xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Huge page-table-entry support for IO memory.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include "vmwgfx_drv.h"
8*4882a593Smuzhiyun #include <drm/ttm/ttm_module.h>
9*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
10*4882a593Smuzhiyun #include <drm/ttm/ttm_placement.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun  * struct vmw_thp_manager - Range manager implementing huge page alignment
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * @mm: The underlying range manager. Protected by @lock.
16*4882a593Smuzhiyun  * @lock: Manager lock.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun struct vmw_thp_manager {
19*4882a593Smuzhiyun 	struct ttm_resource_manager manager;
20*4882a593Smuzhiyun 	struct drm_mm mm;
21*4882a593Smuzhiyun 	spinlock_t lock;
22*4882a593Smuzhiyun };
23*4882a593Smuzhiyun 
to_thp_manager(struct ttm_resource_manager * man)24*4882a593Smuzhiyun static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	return container_of(man, struct vmw_thp_manager, manager);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static const struct ttm_resource_manager_func vmw_thp_func;
30*4882a593Smuzhiyun 
vmw_thp_insert_aligned(struct drm_mm * mm,struct drm_mm_node * node,unsigned long align_pages,const struct ttm_place * place,struct ttm_resource * mem,unsigned long lpfn,enum drm_mm_insert_mode mode)31*4882a593Smuzhiyun static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
32*4882a593Smuzhiyun 				  unsigned long align_pages,
33*4882a593Smuzhiyun 				  const struct ttm_place *place,
34*4882a593Smuzhiyun 				  struct ttm_resource *mem,
35*4882a593Smuzhiyun 				  unsigned long lpfn,
36*4882a593Smuzhiyun 				  enum drm_mm_insert_mode mode)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	if (align_pages >= mem->page_alignment &&
39*4882a593Smuzhiyun 	    (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
40*4882a593Smuzhiyun 		return drm_mm_insert_node_in_range(mm, node,
41*4882a593Smuzhiyun 						   mem->num_pages,
42*4882a593Smuzhiyun 						   align_pages, 0,
43*4882a593Smuzhiyun 						   place->fpfn, lpfn, mode);
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	return -ENOSPC;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
vmw_thp_get_node(struct ttm_resource_manager * man,struct ttm_buffer_object * bo,const struct ttm_place * place,struct ttm_resource * mem)49*4882a593Smuzhiyun static int vmw_thp_get_node(struct ttm_resource_manager *man,
50*4882a593Smuzhiyun 			    struct ttm_buffer_object *bo,
51*4882a593Smuzhiyun 			    const struct ttm_place *place,
52*4882a593Smuzhiyun 			    struct ttm_resource *mem)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct vmw_thp_manager *rman = to_thp_manager(man);
55*4882a593Smuzhiyun 	struct drm_mm *mm = &rman->mm;
56*4882a593Smuzhiyun 	struct drm_mm_node *node;
57*4882a593Smuzhiyun 	unsigned long align_pages;
58*4882a593Smuzhiyun 	unsigned long lpfn;
59*4882a593Smuzhiyun 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST;
60*4882a593Smuzhiyun 	int ret;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	node = kzalloc(sizeof(*node), GFP_KERNEL);
63*4882a593Smuzhiyun 	if (!node)
64*4882a593Smuzhiyun 		return -ENOMEM;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	lpfn = place->lpfn;
67*4882a593Smuzhiyun 	if (!lpfn)
68*4882a593Smuzhiyun 		lpfn = man->size;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	mode = DRM_MM_INSERT_BEST;
71*4882a593Smuzhiyun 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
72*4882a593Smuzhiyun 		mode = DRM_MM_INSERT_HIGH;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	spin_lock(&rman->lock);
75*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
76*4882a593Smuzhiyun 		align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
77*4882a593Smuzhiyun 		if (mem->num_pages >= align_pages) {
78*4882a593Smuzhiyun 			ret = vmw_thp_insert_aligned(mm, node, align_pages,
79*4882a593Smuzhiyun 						     place, mem, lpfn, mode);
80*4882a593Smuzhiyun 			if (!ret)
81*4882a593Smuzhiyun 				goto found_unlock;
82*4882a593Smuzhiyun 		}
83*4882a593Smuzhiyun 	}
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
86*4882a593Smuzhiyun 	if (mem->num_pages >= align_pages) {
87*4882a593Smuzhiyun 		ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
88*4882a593Smuzhiyun 					     lpfn, mode);
89*4882a593Smuzhiyun 		if (!ret)
90*4882a593Smuzhiyun 			goto found_unlock;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
94*4882a593Smuzhiyun 					  mem->page_alignment, 0,
95*4882a593Smuzhiyun 					  place->fpfn, lpfn, mode);
96*4882a593Smuzhiyun found_unlock:
97*4882a593Smuzhiyun 	spin_unlock(&rman->lock);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (unlikely(ret)) {
100*4882a593Smuzhiyun 		kfree(node);
101*4882a593Smuzhiyun 	} else {
102*4882a593Smuzhiyun 		mem->mm_node = node;
103*4882a593Smuzhiyun 		mem->start = node->start;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return ret;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 
vmw_thp_put_node(struct ttm_resource_manager * man,struct ttm_resource * mem)111*4882a593Smuzhiyun static void vmw_thp_put_node(struct ttm_resource_manager *man,
112*4882a593Smuzhiyun 			     struct ttm_resource *mem)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct vmw_thp_manager *rman = to_thp_manager(man);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (mem->mm_node) {
117*4882a593Smuzhiyun 		spin_lock(&rman->lock);
118*4882a593Smuzhiyun 		drm_mm_remove_node(mem->mm_node);
119*4882a593Smuzhiyun 		spin_unlock(&rman->lock);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		kfree(mem->mm_node);
122*4882a593Smuzhiyun 		mem->mm_node = NULL;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
vmw_thp_init(struct vmw_private * dev_priv)126*4882a593Smuzhiyun int vmw_thp_init(struct vmw_private *dev_priv)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct vmw_thp_manager *rman;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
131*4882a593Smuzhiyun 	if (!rman)
132*4882a593Smuzhiyun 		return -ENOMEM;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	ttm_resource_manager_init(&rman->manager,
135*4882a593Smuzhiyun 				  dev_priv->vram_size >> PAGE_SHIFT);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	rman->manager.func = &vmw_thp_func;
138*4882a593Smuzhiyun 	drm_mm_init(&rman->mm, 0, rman->manager.size);
139*4882a593Smuzhiyun 	spin_lock_init(&rman->lock);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, &rman->manager);
142*4882a593Smuzhiyun 	ttm_resource_manager_set_used(&rman->manager, true);
143*4882a593Smuzhiyun 	return 0;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
vmw_thp_fini(struct vmw_private * dev_priv)146*4882a593Smuzhiyun void vmw_thp_fini(struct vmw_private *dev_priv)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
149*4882a593Smuzhiyun 	struct vmw_thp_manager *rman = to_thp_manager(man);
150*4882a593Smuzhiyun 	struct drm_mm *mm = &rman->mm;
151*4882a593Smuzhiyun 	int ret;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	ttm_resource_manager_set_used(man, false);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
156*4882a593Smuzhiyun 	if (ret)
157*4882a593Smuzhiyun 		return;
158*4882a593Smuzhiyun 	spin_lock(&rman->lock);
159*4882a593Smuzhiyun 	drm_mm_clean(mm);
160*4882a593Smuzhiyun 	drm_mm_takedown(mm);
161*4882a593Smuzhiyun 	spin_unlock(&rman->lock);
162*4882a593Smuzhiyun 	ttm_resource_manager_cleanup(man);
163*4882a593Smuzhiyun 	ttm_set_driver_manager(&dev_priv->bdev, TTM_PL_VRAM, NULL);
164*4882a593Smuzhiyun 	kfree(rman);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
vmw_thp_debug(struct ttm_resource_manager * man,struct drm_printer * printer)167*4882a593Smuzhiyun static void vmw_thp_debug(struct ttm_resource_manager *man,
168*4882a593Smuzhiyun 			  struct drm_printer *printer)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct vmw_thp_manager *rman = to_thp_manager(man);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	spin_lock(&rman->lock);
173*4882a593Smuzhiyun 	drm_mm_print(&rman->mm, printer);
174*4882a593Smuzhiyun 	spin_unlock(&rman->lock);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun static const struct ttm_resource_manager_func vmw_thp_func = {
178*4882a593Smuzhiyun 	.alloc = vmw_thp_get_node,
179*4882a593Smuzhiyun 	.free = vmw_thp_put_node,
180*4882a593Smuzhiyun 	.debug = vmw_thp_debug
181*4882a593Smuzhiyun };
182