xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/ttm/ttm_agp_backend.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2*4882a593Smuzhiyun /**************************************************************************
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5*4882a593Smuzhiyun  * All Rights Reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
8*4882a593Smuzhiyun  * copy of this software and associated documentation files (the
9*4882a593Smuzhiyun  * "Software"), to deal in the Software without restriction, including
10*4882a593Smuzhiyun  * without limitation the rights to use, copy, modify, merge, publish,
11*4882a593Smuzhiyun  * distribute, sub license, and/or sell copies of the Software, and to
12*4882a593Smuzhiyun  * permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun  * the following conditions:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
16*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
17*4882a593Smuzhiyun  * of the Software.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22*4882a593Smuzhiyun  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23*4882a593Smuzhiyun  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24*4882a593Smuzhiyun  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25*4882a593Smuzhiyun  * USE OR OTHER DEALINGS IN THE SOFTWARE.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  **************************************************************************/
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30*4882a593Smuzhiyun  *          Keith Packard.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define pr_fmt(fmt) "[TTM] " fmt
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <drm/ttm/ttm_module.h>
36*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
37*4882a593Smuzhiyun #include <drm/ttm/ttm_page_alloc.h>
38*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
39*4882a593Smuzhiyun #include <linux/agp_backend.h>
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include <linux/io.h>
43*4882a593Smuzhiyun #include <asm/agp.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct ttm_agp_backend {
46*4882a593Smuzhiyun 	struct ttm_tt ttm;
47*4882a593Smuzhiyun 	struct agp_memory *mem;
48*4882a593Smuzhiyun 	struct agp_bridge_data *bridge;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
ttm_agp_bind(struct ttm_tt * ttm,struct ttm_resource * bo_mem)51*4882a593Smuzhiyun int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
54*4882a593Smuzhiyun 	struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
55*4882a593Smuzhiyun 	struct drm_mm_node *node = bo_mem->mm_node;
56*4882a593Smuzhiyun 	struct agp_memory *mem;
57*4882a593Smuzhiyun 	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
58*4882a593Smuzhiyun 	unsigned i;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	if (agp_be->mem)
61*4882a593Smuzhiyun 		return 0;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
64*4882a593Smuzhiyun 	if (unlikely(mem == NULL))
65*4882a593Smuzhiyun 		return -ENOMEM;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	mem->page_count = 0;
68*4882a593Smuzhiyun 	for (i = 0; i < ttm->num_pages; i++) {
69*4882a593Smuzhiyun 		struct page *page = ttm->pages[i];
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 		if (!page)
72*4882a593Smuzhiyun 			page = dummy_read_page;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		mem->pages[mem->page_count++] = page;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 	agp_be->mem = mem;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	mem->is_flushed = 1;
79*4882a593Smuzhiyun 	mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	ret = agp_bind_memory(mem, node->start);
82*4882a593Smuzhiyun 	if (ret)
83*4882a593Smuzhiyun 		pr_err("AGP Bind memory failed\n");
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	return ret;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_agp_bind);
88*4882a593Smuzhiyun 
ttm_agp_unbind(struct ttm_tt * ttm)89*4882a593Smuzhiyun void ttm_agp_unbind(struct ttm_tt *ttm)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (agp_be->mem) {
94*4882a593Smuzhiyun 		if (agp_be->mem->is_bound) {
95*4882a593Smuzhiyun 			agp_unbind_memory(agp_be->mem);
96*4882a593Smuzhiyun 			return;
97*4882a593Smuzhiyun 		}
98*4882a593Smuzhiyun 		agp_free_memory(agp_be->mem);
99*4882a593Smuzhiyun 		agp_be->mem = NULL;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_agp_unbind);
103*4882a593Smuzhiyun 
ttm_agp_is_bound(struct ttm_tt * ttm)104*4882a593Smuzhiyun bool ttm_agp_is_bound(struct ttm_tt *ttm)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	if (!ttm)
109*4882a593Smuzhiyun 		return false;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	return (agp_be->mem != NULL);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_agp_is_bound);
114*4882a593Smuzhiyun 
ttm_agp_destroy(struct ttm_tt * ttm)115*4882a593Smuzhiyun void ttm_agp_destroy(struct ttm_tt *ttm)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (agp_be->mem)
120*4882a593Smuzhiyun 		ttm_agp_unbind(ttm);
121*4882a593Smuzhiyun 	ttm_tt_fini(ttm);
122*4882a593Smuzhiyun 	kfree(agp_be);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_agp_destroy);
125*4882a593Smuzhiyun 
ttm_agp_tt_create(struct ttm_buffer_object * bo,struct agp_bridge_data * bridge,uint32_t page_flags)126*4882a593Smuzhiyun struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
127*4882a593Smuzhiyun 				 struct agp_bridge_data *bridge,
128*4882a593Smuzhiyun 				 uint32_t page_flags)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct ttm_agp_backend *agp_be;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
133*4882a593Smuzhiyun 	if (!agp_be)
134*4882a593Smuzhiyun 		return NULL;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	agp_be->mem = NULL;
137*4882a593Smuzhiyun 	agp_be->bridge = bridge;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
140*4882a593Smuzhiyun 		kfree(agp_be);
141*4882a593Smuzhiyun 		return NULL;
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return &agp_be->ttm;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_agp_tt_create);
147