xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/ttm/ttm_page_alloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) Red Hat Inc.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun  * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun  * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun  * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8*4882a593Smuzhiyun  * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun  * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * The above copyright notice and this permission notice (including the
12*4882a593Smuzhiyun  * next paragraph) shall be included in all copies or substantial portions
13*4882a593Smuzhiyun  * of the Software.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21*4882a593Smuzhiyun  * DEALINGS IN THE SOFTWARE.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * Authors: Dave Airlie <airlied@redhat.com>
24*4882a593Smuzhiyun  *          Jerome Glisse <jglisse@redhat.com>
25*4882a593Smuzhiyun  *          Pauli Nieminen <suokkos@gmail.com>
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* simple list based uncached page pool
29*4882a593Smuzhiyun  * - Pool collects resently freed pages for reuse
30*4882a593Smuzhiyun  * - Use page->lru to keep a free list
31*4882a593Smuzhiyun  * - doesn't track currently in use pages
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define pr_fmt(fmt) "[TTM] " fmt
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/list.h>
37*4882a593Smuzhiyun #include <linux/spinlock.h>
38*4882a593Smuzhiyun #include <linux/highmem.h>
39*4882a593Smuzhiyun #include <linux/mm_types.h>
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/mm.h>
42*4882a593Smuzhiyun #include <linux/seq_file.h> /* for seq_printf */
43*4882a593Smuzhiyun #include <linux/slab.h>
44*4882a593Smuzhiyun #include <linux/dma-mapping.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include <linux/atomic.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <drm/ttm/ttm_bo_driver.h>
49*4882a593Smuzhiyun #include <drm/ttm/ttm_page_alloc.h>
50*4882a593Smuzhiyun #include <drm/ttm/ttm_set_memory.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
53*4882a593Smuzhiyun #define SMALL_ALLOCATION		16
54*4882a593Smuzhiyun #define FREE_ALL_PAGES			(~0U)
55*4882a593Smuzhiyun /* times are in msecs */
56*4882a593Smuzhiyun #define PAGE_FREE_INTERVAL		1000
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * @lock: Protects the shared pool from concurrnet access. Must be used with
62*4882a593Smuzhiyun  * irqsave/irqrestore variants because pool allocator maybe called from
63*4882a593Smuzhiyun  * delayed work.
64*4882a593Smuzhiyun  * @fill_lock: Prevent concurrent calls to fill.
65*4882a593Smuzhiyun  * @list: Pool of free uc/wc pages for fast reuse.
66*4882a593Smuzhiyun  * @gfp_flags: Flags to pass for alloc_page.
67*4882a593Smuzhiyun  * @npages: Number of pages in pool.
68*4882a593Smuzhiyun  */
69*4882a593Smuzhiyun struct ttm_page_pool {
70*4882a593Smuzhiyun 	spinlock_t		lock;
71*4882a593Smuzhiyun 	bool			fill_lock;
72*4882a593Smuzhiyun 	struct list_head	list;
73*4882a593Smuzhiyun 	gfp_t			gfp_flags;
74*4882a593Smuzhiyun 	unsigned		npages;
75*4882a593Smuzhiyun 	char			*name;
76*4882a593Smuzhiyun 	unsigned long		nfrees;
77*4882a593Smuzhiyun 	unsigned long		nrefills;
78*4882a593Smuzhiyun 	unsigned int		order;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /**
82*4882a593Smuzhiyun  * Limits for the pool. They are handled without locks because only place where
83*4882a593Smuzhiyun  * they may change is in sysfs store. They won't have immediate effect anyway
84*4882a593Smuzhiyun  * so forcing serialization to access them is pointless.
85*4882a593Smuzhiyun  */
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct ttm_pool_opts {
88*4882a593Smuzhiyun 	unsigned	alloc_size;
89*4882a593Smuzhiyun 	unsigned	max_size;
90*4882a593Smuzhiyun 	unsigned	small;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define NUM_POOLS 6
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun  * struct ttm_pool_manager - Holds memory pools for fst allocation
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * Manager is read only object for pool code so it doesn't need locking.
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * @free_interval: minimum number of jiffies between freeing pages from pool.
101*4882a593Smuzhiyun  * @page_alloc_inited: reference counting for pool allocation.
102*4882a593Smuzhiyun  * @work: Work that is used to shrink the pool. Work is only run when there is
103*4882a593Smuzhiyun  * some pages to free.
104*4882a593Smuzhiyun  * @small_allocation: Limit in number of pages what is small allocation.
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  * @pools: All pool objects in use.
107*4882a593Smuzhiyun  **/
108*4882a593Smuzhiyun struct ttm_pool_manager {
109*4882a593Smuzhiyun 	struct kobject		kobj;
110*4882a593Smuzhiyun 	struct shrinker		mm_shrink;
111*4882a593Smuzhiyun 	struct ttm_pool_opts	options;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	union {
114*4882a593Smuzhiyun 		struct ttm_page_pool	pools[NUM_POOLS];
115*4882a593Smuzhiyun 		struct {
116*4882a593Smuzhiyun 			struct ttm_page_pool	wc_pool;
117*4882a593Smuzhiyun 			struct ttm_page_pool	uc_pool;
118*4882a593Smuzhiyun 			struct ttm_page_pool	wc_pool_dma32;
119*4882a593Smuzhiyun 			struct ttm_page_pool	uc_pool_dma32;
120*4882a593Smuzhiyun 			struct ttm_page_pool	wc_pool_huge;
121*4882a593Smuzhiyun 			struct ttm_page_pool	uc_pool_huge;
122*4882a593Smuzhiyun 		} ;
123*4882a593Smuzhiyun 	};
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun static struct attribute ttm_page_pool_max = {
127*4882a593Smuzhiyun 	.name = "pool_max_size",
128*4882a593Smuzhiyun 	.mode = S_IRUGO | S_IWUSR
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun static struct attribute ttm_page_pool_small = {
131*4882a593Smuzhiyun 	.name = "pool_small_allocation",
132*4882a593Smuzhiyun 	.mode = S_IRUGO | S_IWUSR
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun static struct attribute ttm_page_pool_alloc_size = {
135*4882a593Smuzhiyun 	.name = "pool_allocation_size",
136*4882a593Smuzhiyun 	.mode = S_IRUGO | S_IWUSR
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static struct attribute *ttm_pool_attrs[] = {
140*4882a593Smuzhiyun 	&ttm_page_pool_max,
141*4882a593Smuzhiyun 	&ttm_page_pool_small,
142*4882a593Smuzhiyun 	&ttm_page_pool_alloc_size,
143*4882a593Smuzhiyun 	NULL
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
ttm_pool_kobj_release(struct kobject * kobj)146*4882a593Smuzhiyun static void ttm_pool_kobj_release(struct kobject *kobj)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct ttm_pool_manager *m =
149*4882a593Smuzhiyun 		container_of(kobj, struct ttm_pool_manager, kobj);
150*4882a593Smuzhiyun 	kfree(m);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)153*4882a593Smuzhiyun static ssize_t ttm_pool_store(struct kobject *kobj,
154*4882a593Smuzhiyun 		struct attribute *attr, const char *buffer, size_t size)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct ttm_pool_manager *m =
157*4882a593Smuzhiyun 		container_of(kobj, struct ttm_pool_manager, kobj);
158*4882a593Smuzhiyun 	int chars;
159*4882a593Smuzhiyun 	unsigned val;
160*4882a593Smuzhiyun 	chars = sscanf(buffer, "%u", &val);
161*4882a593Smuzhiyun 	if (chars == 0)
162*4882a593Smuzhiyun 		return size;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* Convert kb to number of pages */
165*4882a593Smuzhiyun 	val = val / (PAGE_SIZE >> 10);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (attr == &ttm_page_pool_max)
168*4882a593Smuzhiyun 		m->options.max_size = val;
169*4882a593Smuzhiyun 	else if (attr == &ttm_page_pool_small)
170*4882a593Smuzhiyun 		m->options.small = val;
171*4882a593Smuzhiyun 	else if (attr == &ttm_page_pool_alloc_size) {
172*4882a593Smuzhiyun 		if (val > NUM_PAGES_TO_ALLOC*8) {
173*4882a593Smuzhiyun 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174*4882a593Smuzhiyun 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175*4882a593Smuzhiyun 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176*4882a593Smuzhiyun 			return size;
177*4882a593Smuzhiyun 		} else if (val > NUM_PAGES_TO_ALLOC) {
178*4882a593Smuzhiyun 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179*4882a593Smuzhiyun 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 		m->options.alloc_size = val;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	return size;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)187*4882a593Smuzhiyun static ssize_t ttm_pool_show(struct kobject *kobj,
188*4882a593Smuzhiyun 		struct attribute *attr, char *buffer)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	struct ttm_pool_manager *m =
191*4882a593Smuzhiyun 		container_of(kobj, struct ttm_pool_manager, kobj);
192*4882a593Smuzhiyun 	unsigned val = 0;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (attr == &ttm_page_pool_max)
195*4882a593Smuzhiyun 		val = m->options.max_size;
196*4882a593Smuzhiyun 	else if (attr == &ttm_page_pool_small)
197*4882a593Smuzhiyun 		val = m->options.small;
198*4882a593Smuzhiyun 	else if (attr == &ttm_page_pool_alloc_size)
199*4882a593Smuzhiyun 		val = m->options.alloc_size;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	val = val * (PAGE_SIZE >> 10);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun static const struct sysfs_ops ttm_pool_sysfs_ops = {
207*4882a593Smuzhiyun 	.show = &ttm_pool_show,
208*4882a593Smuzhiyun 	.store = &ttm_pool_store,
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun static struct kobj_type ttm_pool_kobj_type = {
212*4882a593Smuzhiyun 	.release = &ttm_pool_kobj_release,
213*4882a593Smuzhiyun 	.sysfs_ops = &ttm_pool_sysfs_ops,
214*4882a593Smuzhiyun 	.default_attrs = ttm_pool_attrs,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun static struct ttm_pool_manager *_manager;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /**
220*4882a593Smuzhiyun  * Select the right pool or requested caching state and ttm flags. */
ttm_get_pool(int flags,bool huge,enum ttm_caching_state cstate)221*4882a593Smuzhiyun static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
222*4882a593Smuzhiyun 					  enum ttm_caching_state cstate)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	int pool_index;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (cstate == tt_cached)
227*4882a593Smuzhiyun 		return NULL;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (cstate == tt_wc)
230*4882a593Smuzhiyun 		pool_index = 0x0;
231*4882a593Smuzhiyun 	else
232*4882a593Smuzhiyun 		pool_index = 0x1;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (flags & TTM_PAGE_FLAG_DMA32) {
235*4882a593Smuzhiyun 		if (huge)
236*4882a593Smuzhiyun 			return NULL;
237*4882a593Smuzhiyun 		pool_index |= 0x2;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	} else if (huge) {
240*4882a593Smuzhiyun 		pool_index |= 0x4;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return &_manager->pools[pool_index];
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /* set memory back to wb and free the pages. */
ttm_pages_put(struct page * pages[],unsigned npages,unsigned int order)247*4882a593Smuzhiyun static void ttm_pages_put(struct page *pages[], unsigned npages,
248*4882a593Smuzhiyun 		unsigned int order)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	unsigned int i, pages_nr = (1 << order);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (order == 0) {
253*4882a593Smuzhiyun 		if (ttm_set_pages_array_wb(pages, npages))
254*4882a593Smuzhiyun 			pr_err("Failed to set %d pages to wb!\n", npages);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	for (i = 0; i < npages; ++i) {
258*4882a593Smuzhiyun 		if (order > 0) {
259*4882a593Smuzhiyun 			if (ttm_set_pages_wb(pages[i], pages_nr))
260*4882a593Smuzhiyun 				pr_err("Failed to set %d pages to wb!\n", pages_nr);
261*4882a593Smuzhiyun 		}
262*4882a593Smuzhiyun 		__free_pages(pages[i], order);
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
ttm_pool_update_free_locked(struct ttm_page_pool * pool,unsigned freed_pages)266*4882a593Smuzhiyun static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
267*4882a593Smuzhiyun 		unsigned freed_pages)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	pool->npages -= freed_pages;
270*4882a593Smuzhiyun 	pool->nfrees += freed_pages;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun  * Free pages from pool.
275*4882a593Smuzhiyun  *
276*4882a593Smuzhiyun  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
277*4882a593Smuzhiyun  * number of pages in one go.
278*4882a593Smuzhiyun  *
279*4882a593Smuzhiyun  * @pool: to free the pages from
280*4882a593Smuzhiyun  * @free_all: If set to true will free all pages in pool
281*4882a593Smuzhiyun  * @use_static: Safe to use static buffer
282*4882a593Smuzhiyun  **/
ttm_page_pool_free(struct ttm_page_pool * pool,unsigned nr_free,bool use_static)283*4882a593Smuzhiyun static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
284*4882a593Smuzhiyun 			      bool use_static)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	static struct page *static_buf[NUM_PAGES_TO_ALLOC];
287*4882a593Smuzhiyun 	unsigned long irq_flags;
288*4882a593Smuzhiyun 	struct page *p;
289*4882a593Smuzhiyun 	struct page **pages_to_free;
290*4882a593Smuzhiyun 	unsigned freed_pages = 0,
291*4882a593Smuzhiyun 		 npages_to_free = nr_free;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	if (NUM_PAGES_TO_ALLOC < nr_free)
294*4882a593Smuzhiyun 		npages_to_free = NUM_PAGES_TO_ALLOC;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (use_static)
297*4882a593Smuzhiyun 		pages_to_free = static_buf;
298*4882a593Smuzhiyun 	else
299*4882a593Smuzhiyun 		pages_to_free = kmalloc_array(npages_to_free,
300*4882a593Smuzhiyun 					      sizeof(struct page *),
301*4882a593Smuzhiyun 					      GFP_KERNEL);
302*4882a593Smuzhiyun 	if (!pages_to_free) {
303*4882a593Smuzhiyun 		pr_debug("Failed to allocate memory for pool free operation\n");
304*4882a593Smuzhiyun 		return 0;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun restart:
308*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, irq_flags);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	list_for_each_entry_reverse(p, &pool->list, lru) {
311*4882a593Smuzhiyun 		if (freed_pages >= npages_to_free)
312*4882a593Smuzhiyun 			break;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		pages_to_free[freed_pages++] = p;
315*4882a593Smuzhiyun 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
316*4882a593Smuzhiyun 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
317*4882a593Smuzhiyun 			/* remove range of pages from the pool */
318*4882a593Smuzhiyun 			__list_del(p->lru.prev, &pool->list);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 			ttm_pool_update_free_locked(pool, freed_pages);
321*4882a593Smuzhiyun 			/**
322*4882a593Smuzhiyun 			 * Because changing page caching is costly
323*4882a593Smuzhiyun 			 * we unlock the pool to prevent stalling.
324*4882a593Smuzhiyun 			 */
325*4882a593Smuzhiyun 			spin_unlock_irqrestore(&pool->lock, irq_flags);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 			ttm_pages_put(pages_to_free, freed_pages, pool->order);
328*4882a593Smuzhiyun 			if (likely(nr_free != FREE_ALL_PAGES))
329*4882a593Smuzhiyun 				nr_free -= freed_pages;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 			if (NUM_PAGES_TO_ALLOC >= nr_free)
332*4882a593Smuzhiyun 				npages_to_free = nr_free;
333*4882a593Smuzhiyun 			else
334*4882a593Smuzhiyun 				npages_to_free = NUM_PAGES_TO_ALLOC;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			freed_pages = 0;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 			/* free all so restart the processing */
339*4882a593Smuzhiyun 			if (nr_free)
340*4882a593Smuzhiyun 				goto restart;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 			/* Not allowed to fall through or break because
343*4882a593Smuzhiyun 			 * following context is inside spinlock while we are
344*4882a593Smuzhiyun 			 * outside here.
345*4882a593Smuzhiyun 			 */
346*4882a593Smuzhiyun 			goto out;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/* remove range of pages from the pool */
352*4882a593Smuzhiyun 	if (freed_pages) {
353*4882a593Smuzhiyun 		__list_del(&p->lru, &pool->list);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		ttm_pool_update_free_locked(pool, freed_pages);
356*4882a593Smuzhiyun 		nr_free -= freed_pages;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, irq_flags);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (freed_pages)
362*4882a593Smuzhiyun 		ttm_pages_put(pages_to_free, freed_pages, pool->order);
363*4882a593Smuzhiyun out:
364*4882a593Smuzhiyun 	if (pages_to_free != static_buf)
365*4882a593Smuzhiyun 		kfree(pages_to_free);
366*4882a593Smuzhiyun 	return nr_free;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun  * Callback for mm to request pool to reduce number of page held.
371*4882a593Smuzhiyun  *
372*4882a593Smuzhiyun  * XXX: (dchinner) Deadlock warning!
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * This code is crying out for a shrinker per pool....
375*4882a593Smuzhiyun  */
376*4882a593Smuzhiyun static unsigned long
ttm_pool_shrink_scan(struct shrinker * shrink,struct shrink_control * sc)377*4882a593Smuzhiyun ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	static DEFINE_MUTEX(lock);
380*4882a593Smuzhiyun 	static unsigned start_pool;
381*4882a593Smuzhiyun 	unsigned i;
382*4882a593Smuzhiyun 	unsigned pool_offset;
383*4882a593Smuzhiyun 	struct ttm_page_pool *pool;
384*4882a593Smuzhiyun 	int shrink_pages = sc->nr_to_scan;
385*4882a593Smuzhiyun 	unsigned long freed = 0;
386*4882a593Smuzhiyun 	unsigned int nr_free_pool;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (!mutex_trylock(&lock))
389*4882a593Smuzhiyun 		return SHRINK_STOP;
390*4882a593Smuzhiyun 	pool_offset = ++start_pool % NUM_POOLS;
391*4882a593Smuzhiyun 	/* select start pool in round robin fashion */
392*4882a593Smuzhiyun 	for (i = 0; i < NUM_POOLS; ++i) {
393*4882a593Smuzhiyun 		unsigned nr_free = shrink_pages;
394*4882a593Smuzhiyun 		unsigned page_nr;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		if (shrink_pages == 0)
397*4882a593Smuzhiyun 			break;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
400*4882a593Smuzhiyun 		page_nr = (1 << pool->order);
401*4882a593Smuzhiyun 		/* OK to use static buffer since global mutex is held. */
402*4882a593Smuzhiyun 		nr_free_pool = roundup(nr_free, page_nr) >> pool->order;
403*4882a593Smuzhiyun 		shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true);
404*4882a593Smuzhiyun 		freed += (nr_free_pool - shrink_pages) << pool->order;
405*4882a593Smuzhiyun 		if (freed >= sc->nr_to_scan)
406*4882a593Smuzhiyun 			break;
407*4882a593Smuzhiyun 		shrink_pages <<= pool->order;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	mutex_unlock(&lock);
410*4882a593Smuzhiyun 	return freed;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun static unsigned long
ttm_pool_shrink_count(struct shrinker * shrink,struct shrink_control * sc)415*4882a593Smuzhiyun ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	unsigned i;
418*4882a593Smuzhiyun 	unsigned long count = 0;
419*4882a593Smuzhiyun 	struct ttm_page_pool *pool;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	for (i = 0; i < NUM_POOLS; ++i) {
422*4882a593Smuzhiyun 		pool = &_manager->pools[i];
423*4882a593Smuzhiyun 		count += (pool->npages << pool->order);
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	return count;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
ttm_pool_mm_shrink_init(struct ttm_pool_manager * manager)429*4882a593Smuzhiyun static int ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	manager->mm_shrink.count_objects = ttm_pool_shrink_count;
432*4882a593Smuzhiyun 	manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
433*4882a593Smuzhiyun 	manager->mm_shrink.seeks = 1;
434*4882a593Smuzhiyun 	return register_shrinker(&manager->mm_shrink);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
ttm_pool_mm_shrink_fini(struct ttm_pool_manager * manager)437*4882a593Smuzhiyun static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	unregister_shrinker(&manager->mm_shrink);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
ttm_set_pages_caching(struct page ** pages,enum ttm_caching_state cstate,unsigned cpages)442*4882a593Smuzhiyun static int ttm_set_pages_caching(struct page **pages,
443*4882a593Smuzhiyun 		enum ttm_caching_state cstate, unsigned cpages)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	int r = 0;
446*4882a593Smuzhiyun 	/* Set page caching */
447*4882a593Smuzhiyun 	switch (cstate) {
448*4882a593Smuzhiyun 	case tt_uncached:
449*4882a593Smuzhiyun 		r = ttm_set_pages_array_uc(pages, cpages);
450*4882a593Smuzhiyun 		if (r)
451*4882a593Smuzhiyun 			pr_err("Failed to set %d pages to uc!\n", cpages);
452*4882a593Smuzhiyun 		break;
453*4882a593Smuzhiyun 	case tt_wc:
454*4882a593Smuzhiyun 		r = ttm_set_pages_array_wc(pages, cpages);
455*4882a593Smuzhiyun 		if (r)
456*4882a593Smuzhiyun 			pr_err("Failed to set %d pages to wc!\n", cpages);
457*4882a593Smuzhiyun 		break;
458*4882a593Smuzhiyun 	default:
459*4882a593Smuzhiyun 		break;
460*4882a593Smuzhiyun 	}
461*4882a593Smuzhiyun 	return r;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun  * Free pages the pages that failed to change the caching state. If there is
466*4882a593Smuzhiyun  * any pages that have changed their caching state already put them to the
467*4882a593Smuzhiyun  * pool.
468*4882a593Smuzhiyun  */
ttm_handle_caching_state_failure(struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,struct page ** failed_pages,unsigned cpages)469*4882a593Smuzhiyun static void ttm_handle_caching_state_failure(struct list_head *pages,
470*4882a593Smuzhiyun 		int ttm_flags, enum ttm_caching_state cstate,
471*4882a593Smuzhiyun 		struct page **failed_pages, unsigned cpages)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	unsigned i;
474*4882a593Smuzhiyun 	/* Failed pages have to be freed */
475*4882a593Smuzhiyun 	for (i = 0; i < cpages; ++i) {
476*4882a593Smuzhiyun 		list_del(&failed_pages[i]->lru);
477*4882a593Smuzhiyun 		__free_page(failed_pages[i]);
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun /**
482*4882a593Smuzhiyun  * Allocate new pages with correct caching.
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * This function is reentrant if caller updates count depending on number of
485*4882a593Smuzhiyun  * pages returned in pages array.
486*4882a593Smuzhiyun  */
ttm_alloc_new_pages(struct list_head * pages,gfp_t gfp_flags,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned order)487*4882a593Smuzhiyun static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
488*4882a593Smuzhiyun 			       int ttm_flags, enum ttm_caching_state cstate,
489*4882a593Smuzhiyun 			       unsigned count, unsigned order)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	struct page **caching_array;
492*4882a593Smuzhiyun 	struct page *p;
493*4882a593Smuzhiyun 	int r = 0;
494*4882a593Smuzhiyun 	unsigned i, j, cpages;
495*4882a593Smuzhiyun 	unsigned npages = 1 << order;
496*4882a593Smuzhiyun 	unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* allocate array for page caching change */
499*4882a593Smuzhiyun 	caching_array = kmalloc_array(max_cpages, sizeof(struct page *),
500*4882a593Smuzhiyun 				      GFP_KERNEL);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (!caching_array) {
503*4882a593Smuzhiyun 		pr_debug("Unable to allocate table for new pages\n");
504*4882a593Smuzhiyun 		return -ENOMEM;
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	for (i = 0, cpages = 0; i < count; ++i) {
508*4882a593Smuzhiyun 		p = alloc_pages(gfp_flags, order);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		if (!p) {
511*4882a593Smuzhiyun 			pr_debug("Unable to get page %u\n", i);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 			/* store already allocated pages in the pool after
514*4882a593Smuzhiyun 			 * setting the caching state */
515*4882a593Smuzhiyun 			if (cpages) {
516*4882a593Smuzhiyun 				r = ttm_set_pages_caching(caching_array,
517*4882a593Smuzhiyun 							  cstate, cpages);
518*4882a593Smuzhiyun 				if (r)
519*4882a593Smuzhiyun 					ttm_handle_caching_state_failure(pages,
520*4882a593Smuzhiyun 						ttm_flags, cstate,
521*4882a593Smuzhiyun 						caching_array, cpages);
522*4882a593Smuzhiyun 			}
523*4882a593Smuzhiyun 			r = -ENOMEM;
524*4882a593Smuzhiyun 			goto out;
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 		list_add(&p->lru, pages);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
530*4882a593Smuzhiyun 		/* gfp flags of highmem page should never be dma32 so we
531*4882a593Smuzhiyun 		 * we should be fine in such case
532*4882a593Smuzhiyun 		 */
533*4882a593Smuzhiyun 		if (PageHighMem(p))
534*4882a593Smuzhiyun 			continue;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun #endif
537*4882a593Smuzhiyun 		for (j = 0; j < npages; ++j) {
538*4882a593Smuzhiyun 			caching_array[cpages++] = p++;
539*4882a593Smuzhiyun 			if (cpages == max_cpages) {
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 				r = ttm_set_pages_caching(caching_array,
542*4882a593Smuzhiyun 						cstate, cpages);
543*4882a593Smuzhiyun 				if (r) {
544*4882a593Smuzhiyun 					ttm_handle_caching_state_failure(pages,
545*4882a593Smuzhiyun 						ttm_flags, cstate,
546*4882a593Smuzhiyun 						caching_array, cpages);
547*4882a593Smuzhiyun 					goto out;
548*4882a593Smuzhiyun 				}
549*4882a593Smuzhiyun 				cpages = 0;
550*4882a593Smuzhiyun 			}
551*4882a593Smuzhiyun 		}
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (cpages) {
555*4882a593Smuzhiyun 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
556*4882a593Smuzhiyun 		if (r)
557*4882a593Smuzhiyun 			ttm_handle_caching_state_failure(pages,
558*4882a593Smuzhiyun 					ttm_flags, cstate,
559*4882a593Smuzhiyun 					caching_array, cpages);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun out:
562*4882a593Smuzhiyun 	kfree(caching_array);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return r;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun /**
568*4882a593Smuzhiyun  * Fill the given pool if there aren't enough pages and the requested number of
569*4882a593Smuzhiyun  * pages is small.
570*4882a593Smuzhiyun  */
ttm_page_pool_fill_locked(struct ttm_page_pool * pool,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned long * irq_flags)571*4882a593Smuzhiyun static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
572*4882a593Smuzhiyun 				      enum ttm_caching_state cstate,
573*4882a593Smuzhiyun 				      unsigned count, unsigned long *irq_flags)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	struct page *p;
576*4882a593Smuzhiyun 	int r;
577*4882a593Smuzhiyun 	unsigned cpages = 0;
578*4882a593Smuzhiyun 	/**
579*4882a593Smuzhiyun 	 * Only allow one pool fill operation at a time.
580*4882a593Smuzhiyun 	 * If pool doesn't have enough pages for the allocation new pages are
581*4882a593Smuzhiyun 	 * allocated from outside of pool.
582*4882a593Smuzhiyun 	 */
583*4882a593Smuzhiyun 	if (pool->fill_lock)
584*4882a593Smuzhiyun 		return;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	pool->fill_lock = true;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/* If allocation request is small and there are not enough
589*4882a593Smuzhiyun 	 * pages in a pool we fill the pool up first. */
590*4882a593Smuzhiyun 	if (count < _manager->options.small
591*4882a593Smuzhiyun 		&& count > pool->npages) {
592*4882a593Smuzhiyun 		struct list_head new_pages;
593*4882a593Smuzhiyun 		unsigned alloc_size = _manager->options.alloc_size;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		/**
596*4882a593Smuzhiyun 		 * Can't change page caching if in irqsave context. We have to
597*4882a593Smuzhiyun 		 * drop the pool->lock.
598*4882a593Smuzhiyun 		 */
599*4882a593Smuzhiyun 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		INIT_LIST_HEAD(&new_pages);
602*4882a593Smuzhiyun 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
603*4882a593Smuzhiyun 					cstate, alloc_size, 0);
604*4882a593Smuzhiyun 		spin_lock_irqsave(&pool->lock, *irq_flags);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 		if (!r) {
607*4882a593Smuzhiyun 			list_splice(&new_pages, &pool->list);
608*4882a593Smuzhiyun 			++pool->nrefills;
609*4882a593Smuzhiyun 			pool->npages += alloc_size;
610*4882a593Smuzhiyun 		} else {
611*4882a593Smuzhiyun 			pr_debug("Failed to fill pool (%p)\n", pool);
612*4882a593Smuzhiyun 			/* If we have any pages left put them to the pool. */
613*4882a593Smuzhiyun 			list_for_each_entry(p, &new_pages, lru) {
614*4882a593Smuzhiyun 				++cpages;
615*4882a593Smuzhiyun 			}
616*4882a593Smuzhiyun 			list_splice(&new_pages, &pool->list);
617*4882a593Smuzhiyun 			pool->npages += cpages;
618*4882a593Smuzhiyun 		}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 	pool->fill_lock = false;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun  * Allocate pages from the pool and put them on the return list.
626*4882a593Smuzhiyun  *
627*4882a593Smuzhiyun  * @return zero for success or negative error code.
628*4882a593Smuzhiyun  */
ttm_page_pool_get_pages(struct ttm_page_pool * pool,struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned order)629*4882a593Smuzhiyun static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
630*4882a593Smuzhiyun 				   struct list_head *pages,
631*4882a593Smuzhiyun 				   int ttm_flags,
632*4882a593Smuzhiyun 				   enum ttm_caching_state cstate,
633*4882a593Smuzhiyun 				   unsigned count, unsigned order)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	unsigned long irq_flags;
636*4882a593Smuzhiyun 	struct list_head *p;
637*4882a593Smuzhiyun 	unsigned i;
638*4882a593Smuzhiyun 	int r = 0;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, irq_flags);
641*4882a593Smuzhiyun 	if (!order)
642*4882a593Smuzhiyun 		ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
643*4882a593Smuzhiyun 					  &irq_flags);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (count >= pool->npages) {
646*4882a593Smuzhiyun 		/* take all pages from the pool */
647*4882a593Smuzhiyun 		list_splice_init(&pool->list, pages);
648*4882a593Smuzhiyun 		count -= pool->npages;
649*4882a593Smuzhiyun 		pool->npages = 0;
650*4882a593Smuzhiyun 		goto out;
651*4882a593Smuzhiyun 	}
652*4882a593Smuzhiyun 	/* find the last pages to include for requested number of pages. Split
653*4882a593Smuzhiyun 	 * pool to begin and halve it to reduce search space. */
654*4882a593Smuzhiyun 	if (count <= pool->npages/2) {
655*4882a593Smuzhiyun 		i = 0;
656*4882a593Smuzhiyun 		list_for_each(p, &pool->list) {
657*4882a593Smuzhiyun 			if (++i == count)
658*4882a593Smuzhiyun 				break;
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		i = pool->npages + 1;
662*4882a593Smuzhiyun 		list_for_each_prev(p, &pool->list) {
663*4882a593Smuzhiyun 			if (--i == count)
664*4882a593Smuzhiyun 				break;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 	/* Cut 'count' number of pages from the pool */
668*4882a593Smuzhiyun 	list_cut_position(pages, &pool->list, p);
669*4882a593Smuzhiyun 	pool->npages -= count;
670*4882a593Smuzhiyun 	count = 0;
671*4882a593Smuzhiyun out:
672*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, irq_flags);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	/* clear the pages coming from the pool if requested */
675*4882a593Smuzhiyun 	if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
676*4882a593Smuzhiyun 		struct page *page;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 		list_for_each_entry(page, pages, lru) {
679*4882a593Smuzhiyun 			if (PageHighMem(page))
680*4882a593Smuzhiyun 				clear_highpage(page);
681*4882a593Smuzhiyun 			else
682*4882a593Smuzhiyun 				clear_page(page_address(page));
683*4882a593Smuzhiyun 		}
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/* If pool didn't have enough pages allocate new one. */
687*4882a593Smuzhiyun 	if (count) {
688*4882a593Smuzhiyun 		gfp_t gfp_flags = pool->gfp_flags;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		/* set zero flag for page allocation if required */
691*4882a593Smuzhiyun 		if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
692*4882a593Smuzhiyun 			gfp_flags |= __GFP_ZERO;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 		if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
695*4882a593Smuzhiyun 			gfp_flags |= __GFP_RETRY_MAYFAIL;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		/* ttm_alloc_new_pages doesn't reference pool so we can run
698*4882a593Smuzhiyun 		 * multiple requests in parallel.
699*4882a593Smuzhiyun 		 **/
700*4882a593Smuzhiyun 		r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
701*4882a593Smuzhiyun 					count, order);
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	return r;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /* Put all pages in pages list to correct pool to wait for reuse */
ttm_put_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)708*4882a593Smuzhiyun static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
709*4882a593Smuzhiyun 			  enum ttm_caching_state cstate)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
712*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
713*4882a593Smuzhiyun 	struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
714*4882a593Smuzhiyun #endif
715*4882a593Smuzhiyun 	unsigned long irq_flags;
716*4882a593Smuzhiyun 	unsigned i;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	if (pool == NULL) {
719*4882a593Smuzhiyun 		/* No pool for this memory type so free the pages */
720*4882a593Smuzhiyun 		i = 0;
721*4882a593Smuzhiyun 		while (i < npages) {
722*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
723*4882a593Smuzhiyun 			struct page *p = pages[i];
724*4882a593Smuzhiyun #endif
725*4882a593Smuzhiyun 			unsigned order = 0, j;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 			if (!pages[i]) {
728*4882a593Smuzhiyun 				++i;
729*4882a593Smuzhiyun 				continue;
730*4882a593Smuzhiyun 			}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
733*4882a593Smuzhiyun 			if (!(flags & TTM_PAGE_FLAG_DMA32) &&
734*4882a593Smuzhiyun 			    (npages - i) >= HPAGE_PMD_NR) {
735*4882a593Smuzhiyun 				for (j = 1; j < HPAGE_PMD_NR; ++j)
736*4882a593Smuzhiyun 					if (++p != pages[i + j])
737*4882a593Smuzhiyun 					    break;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 				if (j == HPAGE_PMD_NR)
740*4882a593Smuzhiyun 					order = HPAGE_PMD_ORDER;
741*4882a593Smuzhiyun 			}
742*4882a593Smuzhiyun #endif
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 			if (page_count(pages[i]) != 1)
745*4882a593Smuzhiyun 				pr_err("Erroneous page count. Leaking pages.\n");
746*4882a593Smuzhiyun 			__free_pages(pages[i], order);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 			j = 1 << order;
749*4882a593Smuzhiyun 			while (j) {
750*4882a593Smuzhiyun 				pages[i++] = NULL;
751*4882a593Smuzhiyun 				--j;
752*4882a593Smuzhiyun 			}
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 		return;
755*4882a593Smuzhiyun 	}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	i = 0;
758*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
759*4882a593Smuzhiyun 	if (huge) {
760*4882a593Smuzhiyun 		unsigned max_size, n2free;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 		spin_lock_irqsave(&huge->lock, irq_flags);
763*4882a593Smuzhiyun 		while ((npages - i) >= HPAGE_PMD_NR) {
764*4882a593Smuzhiyun 			struct page *p = pages[i];
765*4882a593Smuzhiyun 			unsigned j;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 			if (!p)
768*4882a593Smuzhiyun 				break;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 			for (j = 1; j < HPAGE_PMD_NR; ++j)
771*4882a593Smuzhiyun 				if (++p != pages[i + j])
772*4882a593Smuzhiyun 				    break;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 			if (j != HPAGE_PMD_NR)
775*4882a593Smuzhiyun 				break;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 			list_add_tail(&pages[i]->lru, &huge->list);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 			for (j = 0; j < HPAGE_PMD_NR; ++j)
780*4882a593Smuzhiyun 				pages[i++] = NULL;
781*4882a593Smuzhiyun 			huge->npages++;
782*4882a593Smuzhiyun 		}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 		/* Check that we don't go over the pool limit */
785*4882a593Smuzhiyun 		max_size = _manager->options.max_size;
786*4882a593Smuzhiyun 		max_size /= HPAGE_PMD_NR;
787*4882a593Smuzhiyun 		if (huge->npages > max_size)
788*4882a593Smuzhiyun 			n2free = huge->npages - max_size;
789*4882a593Smuzhiyun 		else
790*4882a593Smuzhiyun 			n2free = 0;
791*4882a593Smuzhiyun 		spin_unlock_irqrestore(&huge->lock, irq_flags);
792*4882a593Smuzhiyun 		if (n2free)
793*4882a593Smuzhiyun 			ttm_page_pool_free(huge, n2free, false);
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun #endif
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->lock, irq_flags);
798*4882a593Smuzhiyun 	while (i < npages) {
799*4882a593Smuzhiyun 		if (pages[i]) {
800*4882a593Smuzhiyun 			if (page_count(pages[i]) != 1)
801*4882a593Smuzhiyun 				pr_err("Erroneous page count. Leaking pages.\n");
802*4882a593Smuzhiyun 			list_add_tail(&pages[i]->lru, &pool->list);
803*4882a593Smuzhiyun 			pages[i] = NULL;
804*4882a593Smuzhiyun 			pool->npages++;
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 		++i;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 	/* Check that we don't go over the pool limit */
809*4882a593Smuzhiyun 	npages = 0;
810*4882a593Smuzhiyun 	if (pool->npages > _manager->options.max_size) {
811*4882a593Smuzhiyun 		npages = pool->npages - _manager->options.max_size;
812*4882a593Smuzhiyun 		/* free at least NUM_PAGES_TO_ALLOC number of pages
813*4882a593Smuzhiyun 		 * to reduce calls to set_memory_wb */
814*4882a593Smuzhiyun 		if (npages < NUM_PAGES_TO_ALLOC)
815*4882a593Smuzhiyun 			npages = NUM_PAGES_TO_ALLOC;
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->lock, irq_flags);
818*4882a593Smuzhiyun 	if (npages)
819*4882a593Smuzhiyun 		ttm_page_pool_free(pool, npages, false);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun  * On success pages list will hold count number of correctly
824*4882a593Smuzhiyun  * cached pages.
825*4882a593Smuzhiyun  */
ttm_get_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)826*4882a593Smuzhiyun static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
827*4882a593Smuzhiyun 			 enum ttm_caching_state cstate)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun 	struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
830*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
831*4882a593Smuzhiyun 	struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
832*4882a593Smuzhiyun #endif
833*4882a593Smuzhiyun 	struct list_head plist;
834*4882a593Smuzhiyun 	struct page *p = NULL;
835*4882a593Smuzhiyun 	unsigned count, first;
836*4882a593Smuzhiyun 	int r;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* No pool for cached pages */
839*4882a593Smuzhiyun 	if (pool == NULL) {
840*4882a593Smuzhiyun 		gfp_t gfp_flags = GFP_USER;
841*4882a593Smuzhiyun 		unsigned i;
842*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
843*4882a593Smuzhiyun 		unsigned j;
844*4882a593Smuzhiyun #endif
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		/* set zero flag for page allocation if required */
847*4882a593Smuzhiyun 		if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
848*4882a593Smuzhiyun 			gfp_flags |= __GFP_ZERO;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 		if (flags & TTM_PAGE_FLAG_NO_RETRY)
851*4882a593Smuzhiyun 			gfp_flags |= __GFP_RETRY_MAYFAIL;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 		if (flags & TTM_PAGE_FLAG_DMA32)
854*4882a593Smuzhiyun 			gfp_flags |= GFP_DMA32;
855*4882a593Smuzhiyun 		else
856*4882a593Smuzhiyun 			gfp_flags |= GFP_HIGHUSER;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 		i = 0;
859*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
860*4882a593Smuzhiyun 		if (!(gfp_flags & GFP_DMA32)) {
861*4882a593Smuzhiyun 			while (npages >= HPAGE_PMD_NR) {
862*4882a593Smuzhiyun 				gfp_t huge_flags = gfp_flags;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 				huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
865*4882a593Smuzhiyun 					__GFP_KSWAPD_RECLAIM;
866*4882a593Smuzhiyun 				huge_flags &= ~__GFP_MOVABLE;
867*4882a593Smuzhiyun 				huge_flags &= ~__GFP_COMP;
868*4882a593Smuzhiyun 				p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
869*4882a593Smuzhiyun 				if (!p)
870*4882a593Smuzhiyun 					break;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 				for (j = 0; j < HPAGE_PMD_NR; ++j)
873*4882a593Smuzhiyun 					pages[i++] = p++;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 				npages -= HPAGE_PMD_NR;
876*4882a593Smuzhiyun 			}
877*4882a593Smuzhiyun 		}
878*4882a593Smuzhiyun #endif
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		first = i;
881*4882a593Smuzhiyun 		while (npages) {
882*4882a593Smuzhiyun 			p = alloc_page(gfp_flags);
883*4882a593Smuzhiyun 			if (!p) {
884*4882a593Smuzhiyun 				pr_debug("Unable to allocate page\n");
885*4882a593Smuzhiyun 				return -ENOMEM;
886*4882a593Smuzhiyun 			}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 			/* Swap the pages if we detect consecutive order */
889*4882a593Smuzhiyun 			if (i > first && pages[i - 1] == p - 1)
890*4882a593Smuzhiyun 				swap(p, pages[i - 1]);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 			pages[i++] = p;
893*4882a593Smuzhiyun 			--npages;
894*4882a593Smuzhiyun 		}
895*4882a593Smuzhiyun 		return 0;
896*4882a593Smuzhiyun 	}
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	count = 0;
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
901*4882a593Smuzhiyun 	if (huge && npages >= HPAGE_PMD_NR) {
902*4882a593Smuzhiyun 		INIT_LIST_HEAD(&plist);
903*4882a593Smuzhiyun 		ttm_page_pool_get_pages(huge, &plist, flags, cstate,
904*4882a593Smuzhiyun 					npages / HPAGE_PMD_NR,
905*4882a593Smuzhiyun 					HPAGE_PMD_ORDER);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 		list_for_each_entry(p, &plist, lru) {
908*4882a593Smuzhiyun 			unsigned j;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 			for (j = 0; j < HPAGE_PMD_NR; ++j)
911*4882a593Smuzhiyun 				pages[count++] = &p[j];
912*4882a593Smuzhiyun 		}
913*4882a593Smuzhiyun 	}
914*4882a593Smuzhiyun #endif
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	INIT_LIST_HEAD(&plist);
917*4882a593Smuzhiyun 	r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
918*4882a593Smuzhiyun 				    npages - count, 0);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	first = count;
921*4882a593Smuzhiyun 	list_for_each_entry(p, &plist, lru) {
922*4882a593Smuzhiyun 		struct page *tmp = p;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 		/* Swap the pages if we detect consecutive order */
925*4882a593Smuzhiyun 		if (count > first && pages[count - 1] == tmp - 1)
926*4882a593Smuzhiyun 			swap(tmp, pages[count - 1]);
927*4882a593Smuzhiyun 		pages[count++] = tmp;
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (r) {
931*4882a593Smuzhiyun 		/* If there is any pages in the list put them back to
932*4882a593Smuzhiyun 		 * the pool.
933*4882a593Smuzhiyun 		 */
934*4882a593Smuzhiyun 		pr_debug("Failed to allocate extra pages for large request\n");
935*4882a593Smuzhiyun 		ttm_put_pages(pages, count, flags, cstate);
936*4882a593Smuzhiyun 		return r;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
ttm_page_pool_init_locked(struct ttm_page_pool * pool,gfp_t flags,char * name,unsigned int order)942*4882a593Smuzhiyun static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
943*4882a593Smuzhiyun 		char *name, unsigned int order)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun 	spin_lock_init(&pool->lock);
946*4882a593Smuzhiyun 	pool->fill_lock = false;
947*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->list);
948*4882a593Smuzhiyun 	pool->npages = pool->nfrees = 0;
949*4882a593Smuzhiyun 	pool->gfp_flags = flags;
950*4882a593Smuzhiyun 	pool->name = name;
951*4882a593Smuzhiyun 	pool->order = order;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun 
ttm_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)954*4882a593Smuzhiyun int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	int ret;
957*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
958*4882a593Smuzhiyun 	unsigned order = HPAGE_PMD_ORDER;
959*4882a593Smuzhiyun #else
960*4882a593Smuzhiyun 	unsigned order = 0;
961*4882a593Smuzhiyun #endif
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	WARN_ON(_manager);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	pr_info("Initializing pool allocator\n");
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
968*4882a593Smuzhiyun 	if (!_manager)
969*4882a593Smuzhiyun 		return -ENOMEM;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
976*4882a593Smuzhiyun 				  GFP_USER | GFP_DMA32, "wc dma", 0);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
979*4882a593Smuzhiyun 				  GFP_USER | GFP_DMA32, "uc dma", 0);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->wc_pool_huge,
982*4882a593Smuzhiyun 				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
983*4882a593Smuzhiyun 				   __GFP_KSWAPD_RECLAIM) &
984*4882a593Smuzhiyun 				  ~(__GFP_MOVABLE | __GFP_COMP),
985*4882a593Smuzhiyun 				  "wc huge", order);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	ttm_page_pool_init_locked(&_manager->uc_pool_huge,
988*4882a593Smuzhiyun 				  (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
989*4882a593Smuzhiyun 				   __GFP_KSWAPD_RECLAIM) &
990*4882a593Smuzhiyun 				  ~(__GFP_MOVABLE | __GFP_COMP)
991*4882a593Smuzhiyun 				  , "uc huge", order);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	_manager->options.max_size = max_pages;
994*4882a593Smuzhiyun 	_manager->options.small = SMALL_ALLOCATION;
995*4882a593Smuzhiyun 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
998*4882a593Smuzhiyun 				   &glob->kobj, "pool");
999*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1000*4882a593Smuzhiyun 		goto error;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	ret = ttm_pool_mm_shrink_init(_manager);
1003*4882a593Smuzhiyun 	if (unlikely(ret != 0))
1004*4882a593Smuzhiyun 		goto error;
1005*4882a593Smuzhiyun 	return 0;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun error:
1008*4882a593Smuzhiyun 	kobject_put(&_manager->kobj);
1009*4882a593Smuzhiyun 	_manager = NULL;
1010*4882a593Smuzhiyun 	return ret;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
ttm_page_alloc_fini(void)1013*4882a593Smuzhiyun void ttm_page_alloc_fini(void)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	int i;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	pr_info("Finalizing pool allocator\n");
1018*4882a593Smuzhiyun 	ttm_pool_mm_shrink_fini(_manager);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	/* OK to use static buffer since global mutex is no longer used. */
1021*4882a593Smuzhiyun 	for (i = 0; i < NUM_POOLS; ++i)
1022*4882a593Smuzhiyun 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	kobject_put(&_manager->kobj);
1025*4882a593Smuzhiyun 	_manager = NULL;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun static void
ttm_pool_unpopulate_helper(struct ttm_tt * ttm,unsigned mem_count_update)1029*4882a593Smuzhiyun ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1032*4882a593Smuzhiyun 	unsigned i;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (mem_count_update == 0)
1035*4882a593Smuzhiyun 		goto put_pages;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	for (i = 0; i < mem_count_update; ++i) {
1038*4882a593Smuzhiyun 		if (!ttm->pages[i])
1039*4882a593Smuzhiyun 			continue;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		ttm_mem_global_free_page(mem_glob, ttm->pages[i], PAGE_SIZE);
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun put_pages:
1045*4882a593Smuzhiyun 	ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1046*4882a593Smuzhiyun 		      ttm->caching_state);
1047*4882a593Smuzhiyun 	ttm_tt_set_unpopulated(ttm);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun 
ttm_pool_populate(struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1050*4882a593Smuzhiyun int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
1053*4882a593Smuzhiyun 	unsigned i;
1054*4882a593Smuzhiyun 	int ret;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	if (ttm_tt_is_populated(ttm))
1057*4882a593Smuzhiyun 		return 0;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (ttm_check_under_lowerlimit(mem_glob, ttm->num_pages, ctx))
1060*4882a593Smuzhiyun 		return -ENOMEM;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
1063*4882a593Smuzhiyun 			    ttm->caching_state);
1064*4882a593Smuzhiyun 	if (unlikely(ret != 0)) {
1065*4882a593Smuzhiyun 		ttm_pool_unpopulate_helper(ttm, 0);
1066*4882a593Smuzhiyun 		return ret;
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	for (i = 0; i < ttm->num_pages; ++i) {
1070*4882a593Smuzhiyun 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1071*4882a593Smuzhiyun 						PAGE_SIZE, ctx);
1072*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
1073*4882a593Smuzhiyun 			ttm_pool_unpopulate_helper(ttm, i);
1074*4882a593Smuzhiyun 			return -ENOMEM;
1075*4882a593Smuzhiyun 		}
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1079*4882a593Smuzhiyun 		ret = ttm_tt_swapin(ttm);
1080*4882a593Smuzhiyun 		if (unlikely(ret != 0)) {
1081*4882a593Smuzhiyun 			ttm_pool_unpopulate(ttm);
1082*4882a593Smuzhiyun 			return ret;
1083*4882a593Smuzhiyun 		}
1084*4882a593Smuzhiyun 	}
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	ttm_tt_set_populated(ttm);
1087*4882a593Smuzhiyun 	return 0;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_pool_populate);
1090*4882a593Smuzhiyun 
ttm_pool_unpopulate(struct ttm_tt * ttm)1091*4882a593Smuzhiyun void ttm_pool_unpopulate(struct ttm_tt *ttm)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	ttm_pool_unpopulate_helper(ttm, ttm->num_pages);
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_pool_unpopulate);
1096*4882a593Smuzhiyun 
ttm_populate_and_map_pages(struct device * dev,struct ttm_dma_tt * tt,struct ttm_operation_ctx * ctx)1097*4882a593Smuzhiyun int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
1098*4882a593Smuzhiyun 					struct ttm_operation_ctx *ctx)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun 	unsigned i, j;
1101*4882a593Smuzhiyun 	int r;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	r = ttm_pool_populate(&tt->ttm, ctx);
1104*4882a593Smuzhiyun 	if (r)
1105*4882a593Smuzhiyun 		return r;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	for (i = 0; i < tt->ttm.num_pages; ++i) {
1108*4882a593Smuzhiyun 		struct page *p = tt->ttm.pages[i];
1109*4882a593Smuzhiyun 		size_t num_pages = 1;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1112*4882a593Smuzhiyun 			if (++p != tt->ttm.pages[j])
1113*4882a593Smuzhiyun 				break;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 			++num_pages;
1116*4882a593Smuzhiyun 		}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 		tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
1119*4882a593Smuzhiyun 						  0, num_pages * PAGE_SIZE,
1120*4882a593Smuzhiyun 						  DMA_BIDIRECTIONAL);
1121*4882a593Smuzhiyun 		if (dma_mapping_error(dev, tt->dma_address[i])) {
1122*4882a593Smuzhiyun 			while (i--) {
1123*4882a593Smuzhiyun 				dma_unmap_page(dev, tt->dma_address[i],
1124*4882a593Smuzhiyun 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1125*4882a593Smuzhiyun 				tt->dma_address[i] = 0;
1126*4882a593Smuzhiyun 			}
1127*4882a593Smuzhiyun 			ttm_pool_unpopulate(&tt->ttm);
1128*4882a593Smuzhiyun 			return -EFAULT;
1129*4882a593Smuzhiyun 		}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 		for (j = 1; j < num_pages; ++j) {
1132*4882a593Smuzhiyun 			tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE;
1133*4882a593Smuzhiyun 			++i;
1134*4882a593Smuzhiyun 		}
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 	return 0;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_populate_and_map_pages);
1139*4882a593Smuzhiyun 
ttm_unmap_and_unpopulate_pages(struct device * dev,struct ttm_dma_tt * tt)1140*4882a593Smuzhiyun void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	unsigned i, j;
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	for (i = 0; i < tt->ttm.num_pages;) {
1145*4882a593Smuzhiyun 		struct page *p = tt->ttm.pages[i];
1146*4882a593Smuzhiyun 		size_t num_pages = 1;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 		if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
1149*4882a593Smuzhiyun 			++i;
1150*4882a593Smuzhiyun 			continue;
1151*4882a593Smuzhiyun 		}
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		for (j = i + 1; j < tt->ttm.num_pages; ++j) {
1154*4882a593Smuzhiyun 			if (++p != tt->ttm.pages[j])
1155*4882a593Smuzhiyun 				break;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 			++num_pages;
1158*4882a593Smuzhiyun 		}
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE,
1161*4882a593Smuzhiyun 			       DMA_BIDIRECTIONAL);
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		i += num_pages;
1164*4882a593Smuzhiyun 	}
1165*4882a593Smuzhiyun 	ttm_pool_unpopulate(&tt->ttm);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
1168*4882a593Smuzhiyun 
ttm_page_alloc_debugfs(struct seq_file * m,void * data)1169*4882a593Smuzhiyun int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun 	struct ttm_page_pool *p;
1172*4882a593Smuzhiyun 	unsigned i;
1173*4882a593Smuzhiyun 	char *h[] = {"pool", "refills", "pages freed", "size"};
1174*4882a593Smuzhiyun 	if (!_manager) {
1175*4882a593Smuzhiyun 		seq_printf(m, "No pool allocator running.\n");
1176*4882a593Smuzhiyun 		return 0;
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 	seq_printf(m, "%7s %12s %13s %8s\n",
1179*4882a593Smuzhiyun 			h[0], h[1], h[2], h[3]);
1180*4882a593Smuzhiyun 	for (i = 0; i < NUM_POOLS; ++i) {
1181*4882a593Smuzhiyun 		p = &_manager->pools[i];
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 		seq_printf(m, "%7s %12ld %13ld %8d\n",
1184*4882a593Smuzhiyun 				p->name, p->nrefills,
1185*4882a593Smuzhiyun 				p->nfrees, p->npages);
1186*4882a593Smuzhiyun 	}
1187*4882a593Smuzhiyun 	return 0;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun EXPORT_SYMBOL(ttm_page_alloc_debugfs);
1190