1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * High memory handling common code and variables.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6*4882a593Smuzhiyun * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Redesigned the x86 32-bit VM architecture to deal with
10*4882a593Smuzhiyun * 64-bit physical space. With current x86 CPUs this
11*4882a593Smuzhiyun * means up to 64 Gigabytes physical RAM.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Rewrote high memory support to move the page cache into
14*4882a593Smuzhiyun * high memory. Implemented permanent (schedulable) kmaps
15*4882a593Smuzhiyun * based on Linus' idea.
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/mm.h>
21*4882a593Smuzhiyun #include <linux/export.h>
22*4882a593Smuzhiyun #include <linux/swap.h>
23*4882a593Smuzhiyun #include <linux/bio.h>
24*4882a593Smuzhiyun #include <linux/pagemap.h>
25*4882a593Smuzhiyun #include <linux/mempool.h>
26*4882a593Smuzhiyun #include <linux/blkdev.h>
27*4882a593Smuzhiyun #include <linux/init.h>
28*4882a593Smuzhiyun #include <linux/hash.h>
29*4882a593Smuzhiyun #include <linux/highmem.h>
30*4882a593Smuzhiyun #include <linux/kgdb.h>
31*4882a593Smuzhiyun #include <asm/tlbflush.h>
32*4882a593Smuzhiyun #include <linux/vmalloc.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
35*4882a593Smuzhiyun DEFINE_PER_CPU(int, __kmap_atomic_idx);
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun * Virtual_count is not a pure "count".
40*4882a593Smuzhiyun * 0 means that it is not mapped, and has not been mapped
41*4882a593Smuzhiyun * since a TLB flush - it is usable.
42*4882a593Smuzhiyun * 1 means that there are no users, but it has been mapped
43*4882a593Smuzhiyun * since the last TLB flush - so we can't use it.
44*4882a593Smuzhiyun * n means that there are (n-1) current users of it.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun * Architecture with aliasing data cache may define the following family of
50*4882a593Smuzhiyun * helper functions in its asm/highmem.h to control cache color of virtual
51*4882a593Smuzhiyun * addresses where physical memory pages are mapped by kmap.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #ifndef get_pkmap_color
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Determine color of virtual address where the page should be mapped.
57*4882a593Smuzhiyun */
get_pkmap_color(struct page * page)58*4882a593Smuzhiyun static inline unsigned int get_pkmap_color(struct page *page)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun #define get_pkmap_color get_pkmap_color
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Get next index for mapping inside PKMAP region for page with given color.
66*4882a593Smuzhiyun */
get_next_pkmap_nr(unsigned int color)67*4882a593Smuzhiyun static inline unsigned int get_next_pkmap_nr(unsigned int color)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun static unsigned int last_pkmap_nr;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
72*4882a593Smuzhiyun return last_pkmap_nr;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun * Determine if page index inside PKMAP region (pkmap_nr) of given color
77*4882a593Smuzhiyun * has wrapped around PKMAP region end. When this happens an attempt to
78*4882a593Smuzhiyun * flush all unused PKMAP slots is made.
79*4882a593Smuzhiyun */
no_more_pkmaps(unsigned int pkmap_nr,unsigned int color)80*4882a593Smuzhiyun static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun return pkmap_nr == 0;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Get the number of PKMAP entries of the given color. If no free slot is
87*4882a593Smuzhiyun * found after checking that many entries, kmap will sleep waiting for
88*4882a593Smuzhiyun * someone to call kunmap and free PKMAP slot.
89*4882a593Smuzhiyun */
get_pkmap_entries_count(unsigned int color)90*4882a593Smuzhiyun static inline int get_pkmap_entries_count(unsigned int color)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return LAST_PKMAP;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun * Get head of a wait queue for PKMAP entries of the given color.
97*4882a593Smuzhiyun * Wait queues for different mapping colors should be independent to avoid
98*4882a593Smuzhiyun * unnecessary wakeups caused by freeing of slots of other colors.
99*4882a593Smuzhiyun */
get_pkmap_wait_queue_head(unsigned int color)100*4882a593Smuzhiyun static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return &pkmap_map_wait;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun atomic_long_t _totalhigh_pages __read_mostly;
109*4882a593Smuzhiyun EXPORT_SYMBOL(_totalhigh_pages);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
112*4882a593Smuzhiyun
nr_free_highpages(void)113*4882a593Smuzhiyun unsigned int nr_free_highpages (void)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct zone *zone;
116*4882a593Smuzhiyun unsigned int pages = 0;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun for_each_populated_zone(zone) {
119*4882a593Smuzhiyun if (is_highmem(zone))
120*4882a593Smuzhiyun pages += zone_page_state(zone, NR_FREE_PAGES);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return pages;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static int pkmap_count[LAST_PKMAP];
127*4882a593Smuzhiyun static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun pte_t * pkmap_page_table;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * Most architectures have no use for kmap_high_get(), so let's abstract
133*4882a593Smuzhiyun * the disabling of IRQ out of the locking in that case to save on a
134*4882a593Smuzhiyun * potential useless overhead.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun #ifdef ARCH_NEEDS_KMAP_HIGH_GET
137*4882a593Smuzhiyun #define lock_kmap() spin_lock_irq(&kmap_lock)
138*4882a593Smuzhiyun #define unlock_kmap() spin_unlock_irq(&kmap_lock)
139*4882a593Smuzhiyun #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
140*4882a593Smuzhiyun #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
141*4882a593Smuzhiyun #else
142*4882a593Smuzhiyun #define lock_kmap() spin_lock(&kmap_lock)
143*4882a593Smuzhiyun #define unlock_kmap() spin_unlock(&kmap_lock)
144*4882a593Smuzhiyun #define lock_kmap_any(flags) \
145*4882a593Smuzhiyun do { spin_lock(&kmap_lock); (void)(flags); } while (0)
146*4882a593Smuzhiyun #define unlock_kmap_any(flags) \
147*4882a593Smuzhiyun do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
148*4882a593Smuzhiyun #endif
149*4882a593Smuzhiyun
kmap_to_page(void * vaddr)150*4882a593Smuzhiyun struct page *kmap_to_page(void *vaddr)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun unsigned long addr = (unsigned long)vaddr;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
155*4882a593Smuzhiyun int i = PKMAP_NR(addr);
156*4882a593Smuzhiyun return pte_page(pkmap_page_table[i]);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return virt_to_page(addr);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EXPORT_SYMBOL(kmap_to_page);
162*4882a593Smuzhiyun
flush_all_zero_pkmaps(void)163*4882a593Smuzhiyun static void flush_all_zero_pkmaps(void)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun int i;
166*4882a593Smuzhiyun int need_flush = 0;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun flush_cache_kmaps();
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun for (i = 0; i < LAST_PKMAP; i++) {
171*4882a593Smuzhiyun struct page *page;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * zero means we don't have anything to do,
175*4882a593Smuzhiyun * >1 means that it is still in use. Only
176*4882a593Smuzhiyun * a count of 1 means that it is free but
177*4882a593Smuzhiyun * needs to be unmapped
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun if (pkmap_count[i] != 1)
180*4882a593Smuzhiyun continue;
181*4882a593Smuzhiyun pkmap_count[i] = 0;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* sanity check */
184*4882a593Smuzhiyun BUG_ON(pte_none(pkmap_page_table[i]));
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun * Don't need an atomic fetch-and-clear op here;
188*4882a593Smuzhiyun * no-one has the page mapped, and cannot get at
189*4882a593Smuzhiyun * its virtual address (and hence PTE) without first
190*4882a593Smuzhiyun * getting the kmap_lock (which is held here).
191*4882a593Smuzhiyun * So no dangers, even with speculative execution.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun page = pte_page(pkmap_page_table[i]);
194*4882a593Smuzhiyun pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun set_page_address(page, NULL);
197*4882a593Smuzhiyun need_flush = 1;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun if (need_flush)
200*4882a593Smuzhiyun flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
205*4882a593Smuzhiyun */
kmap_flush_unused(void)206*4882a593Smuzhiyun void kmap_flush_unused(void)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun lock_kmap();
209*4882a593Smuzhiyun flush_all_zero_pkmaps();
210*4882a593Smuzhiyun unlock_kmap();
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
map_new_virtual(struct page * page)213*4882a593Smuzhiyun static inline unsigned long map_new_virtual(struct page *page)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun unsigned long vaddr;
216*4882a593Smuzhiyun int count;
217*4882a593Smuzhiyun unsigned int last_pkmap_nr;
218*4882a593Smuzhiyun unsigned int color = get_pkmap_color(page);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun start:
221*4882a593Smuzhiyun count = get_pkmap_entries_count(color);
222*4882a593Smuzhiyun /* Find an empty entry */
223*4882a593Smuzhiyun for (;;) {
224*4882a593Smuzhiyun last_pkmap_nr = get_next_pkmap_nr(color);
225*4882a593Smuzhiyun if (no_more_pkmaps(last_pkmap_nr, color)) {
226*4882a593Smuzhiyun flush_all_zero_pkmaps();
227*4882a593Smuzhiyun count = get_pkmap_entries_count(color);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun if (!pkmap_count[last_pkmap_nr])
230*4882a593Smuzhiyun break; /* Found a usable entry */
231*4882a593Smuzhiyun if (--count)
232*4882a593Smuzhiyun continue;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * Sleep for somebody else to unmap their entries
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
239*4882a593Smuzhiyun wait_queue_head_t *pkmap_map_wait =
240*4882a593Smuzhiyun get_pkmap_wait_queue_head(color);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun __set_current_state(TASK_UNINTERRUPTIBLE);
243*4882a593Smuzhiyun add_wait_queue(pkmap_map_wait, &wait);
244*4882a593Smuzhiyun unlock_kmap();
245*4882a593Smuzhiyun schedule();
246*4882a593Smuzhiyun remove_wait_queue(pkmap_map_wait, &wait);
247*4882a593Smuzhiyun lock_kmap();
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Somebody else might have mapped it while we slept */
250*4882a593Smuzhiyun if (page_address(page))
251*4882a593Smuzhiyun return (unsigned long)page_address(page);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Re-start */
254*4882a593Smuzhiyun goto start;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun vaddr = PKMAP_ADDR(last_pkmap_nr);
258*4882a593Smuzhiyun set_pte_at(&init_mm, vaddr,
259*4882a593Smuzhiyun &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun pkmap_count[last_pkmap_nr] = 1;
262*4882a593Smuzhiyun set_page_address(page, (void *)vaddr);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun return vaddr;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /**
268*4882a593Smuzhiyun * kmap_high - map a highmem page into memory
269*4882a593Smuzhiyun * @page: &struct page to map
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * Returns the page's virtual memory address.
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * We cannot call this from interrupts, as it may block.
274*4882a593Smuzhiyun */
kmap_high(struct page * page)275*4882a593Smuzhiyun void *kmap_high(struct page *page)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun unsigned long vaddr;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * For highmem pages, we can't trust "virtual" until
281*4882a593Smuzhiyun * after we have the lock.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun lock_kmap();
284*4882a593Smuzhiyun vaddr = (unsigned long)page_address(page);
285*4882a593Smuzhiyun if (!vaddr)
286*4882a593Smuzhiyun vaddr = map_new_virtual(page);
287*4882a593Smuzhiyun pkmap_count[PKMAP_NR(vaddr)]++;
288*4882a593Smuzhiyun BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
289*4882a593Smuzhiyun unlock_kmap();
290*4882a593Smuzhiyun return (void*) vaddr;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun EXPORT_SYMBOL(kmap_high);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun #ifdef ARCH_NEEDS_KMAP_HIGH_GET
296*4882a593Smuzhiyun /**
297*4882a593Smuzhiyun * kmap_high_get - pin a highmem page into memory
298*4882a593Smuzhiyun * @page: &struct page to pin
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Returns the page's current virtual memory address, or NULL if no mapping
301*4882a593Smuzhiyun * exists. If and only if a non null address is returned then a
302*4882a593Smuzhiyun * matching call to kunmap_high() is necessary.
303*4882a593Smuzhiyun *
304*4882a593Smuzhiyun * This can be called from any context.
305*4882a593Smuzhiyun */
kmap_high_get(struct page * page)306*4882a593Smuzhiyun void *kmap_high_get(struct page *page)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun unsigned long vaddr, flags;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun lock_kmap_any(flags);
311*4882a593Smuzhiyun vaddr = (unsigned long)page_address(page);
312*4882a593Smuzhiyun if (vaddr) {
313*4882a593Smuzhiyun BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
314*4882a593Smuzhiyun pkmap_count[PKMAP_NR(vaddr)]++;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun unlock_kmap_any(flags);
317*4882a593Smuzhiyun return (void*) vaddr;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun #endif
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun * kunmap_high - unmap a highmem page into memory
323*4882a593Smuzhiyun * @page: &struct page to unmap
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
326*4882a593Smuzhiyun * only from user context.
327*4882a593Smuzhiyun */
kunmap_high(struct page * page)328*4882a593Smuzhiyun void kunmap_high(struct page *page)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun unsigned long vaddr;
331*4882a593Smuzhiyun unsigned long nr;
332*4882a593Smuzhiyun unsigned long flags;
333*4882a593Smuzhiyun int need_wakeup;
334*4882a593Smuzhiyun unsigned int color = get_pkmap_color(page);
335*4882a593Smuzhiyun wait_queue_head_t *pkmap_map_wait;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun lock_kmap_any(flags);
338*4882a593Smuzhiyun vaddr = (unsigned long)page_address(page);
339*4882a593Smuzhiyun BUG_ON(!vaddr);
340*4882a593Smuzhiyun nr = PKMAP_NR(vaddr);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * A count must never go down to zero
344*4882a593Smuzhiyun * without a TLB flush!
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun need_wakeup = 0;
347*4882a593Smuzhiyun switch (--pkmap_count[nr]) {
348*4882a593Smuzhiyun case 0:
349*4882a593Smuzhiyun BUG();
350*4882a593Smuzhiyun case 1:
351*4882a593Smuzhiyun /*
352*4882a593Smuzhiyun * Avoid an unnecessary wake_up() function call.
353*4882a593Smuzhiyun * The common case is pkmap_count[] == 1, but
354*4882a593Smuzhiyun * no waiters.
355*4882a593Smuzhiyun * The tasks queued in the wait-queue are guarded
356*4882a593Smuzhiyun * by both the lock in the wait-queue-head and by
357*4882a593Smuzhiyun * the kmap_lock. As the kmap_lock is held here,
358*4882a593Smuzhiyun * no need for the wait-queue-head's lock. Simply
359*4882a593Smuzhiyun * test if the queue is empty.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun pkmap_map_wait = get_pkmap_wait_queue_head(color);
362*4882a593Smuzhiyun need_wakeup = waitqueue_active(pkmap_map_wait);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun unlock_kmap_any(flags);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* do wake-up, if needed, race-free outside of the spin lock */
367*4882a593Smuzhiyun if (need_wakeup)
368*4882a593Smuzhiyun wake_up(pkmap_map_wait);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun EXPORT_SYMBOL(kunmap_high);
372*4882a593Smuzhiyun #endif /* CONFIG_HIGHMEM */
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun #if defined(HASHED_PAGE_VIRTUAL)
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun #define PA_HASH_ORDER 7
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * Describes one page->virtual association
380*4882a593Smuzhiyun */
381*4882a593Smuzhiyun struct page_address_map {
382*4882a593Smuzhiyun struct page *page;
383*4882a593Smuzhiyun void *virtual;
384*4882a593Smuzhiyun struct list_head list;
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun static struct page_address_map page_address_maps[LAST_PKMAP];
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * Hash table bucket
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun static struct page_address_slot {
393*4882a593Smuzhiyun struct list_head lh; /* List of page_address_maps */
394*4882a593Smuzhiyun spinlock_t lock; /* Protect this bucket's list */
395*4882a593Smuzhiyun } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
396*4882a593Smuzhiyun
page_slot(const struct page * page)397*4882a593Smuzhiyun static struct page_address_slot *page_slot(const struct page *page)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun * page_address - get the mapped virtual address of a page
404*4882a593Smuzhiyun * @page: &struct page to get the virtual address of
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * Returns the page's virtual address.
407*4882a593Smuzhiyun */
page_address(const struct page * page)408*4882a593Smuzhiyun void *page_address(const struct page *page)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun unsigned long flags;
411*4882a593Smuzhiyun void *ret;
412*4882a593Smuzhiyun struct page_address_slot *pas;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (!PageHighMem(page))
415*4882a593Smuzhiyun return lowmem_page_address(page);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun pas = page_slot(page);
418*4882a593Smuzhiyun ret = NULL;
419*4882a593Smuzhiyun spin_lock_irqsave(&pas->lock, flags);
420*4882a593Smuzhiyun if (!list_empty(&pas->lh)) {
421*4882a593Smuzhiyun struct page_address_map *pam;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun list_for_each_entry(pam, &pas->lh, list) {
424*4882a593Smuzhiyun if (pam->page == page) {
425*4882a593Smuzhiyun ret = pam->virtual;
426*4882a593Smuzhiyun goto done;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun done:
431*4882a593Smuzhiyun spin_unlock_irqrestore(&pas->lock, flags);
432*4882a593Smuzhiyun return ret;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun EXPORT_SYMBOL(page_address);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun * set_page_address - set a page's virtual address
439*4882a593Smuzhiyun * @page: &struct page to set
440*4882a593Smuzhiyun * @virtual: virtual address to use
441*4882a593Smuzhiyun */
set_page_address(struct page * page,void * virtual)442*4882a593Smuzhiyun void set_page_address(struct page *page, void *virtual)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun unsigned long flags;
445*4882a593Smuzhiyun struct page_address_slot *pas;
446*4882a593Smuzhiyun struct page_address_map *pam;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun BUG_ON(!PageHighMem(page));
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun pas = page_slot(page);
451*4882a593Smuzhiyun if (virtual) { /* Add */
452*4882a593Smuzhiyun pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
453*4882a593Smuzhiyun pam->page = page;
454*4882a593Smuzhiyun pam->virtual = virtual;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun spin_lock_irqsave(&pas->lock, flags);
457*4882a593Smuzhiyun list_add_tail(&pam->list, &pas->lh);
458*4882a593Smuzhiyun spin_unlock_irqrestore(&pas->lock, flags);
459*4882a593Smuzhiyun } else { /* Remove */
460*4882a593Smuzhiyun spin_lock_irqsave(&pas->lock, flags);
461*4882a593Smuzhiyun list_for_each_entry(pam, &pas->lh, list) {
462*4882a593Smuzhiyun if (pam->page == page) {
463*4882a593Smuzhiyun list_del(&pam->list);
464*4882a593Smuzhiyun spin_unlock_irqrestore(&pas->lock, flags);
465*4882a593Smuzhiyun goto done;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun spin_unlock_irqrestore(&pas->lock, flags);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun done:
471*4882a593Smuzhiyun return;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
page_address_init(void)474*4882a593Smuzhiyun void __init page_address_init(void)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun int i;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
479*4882a593Smuzhiyun INIT_LIST_HEAD(&page_address_htable[i].lh);
480*4882a593Smuzhiyun spin_lock_init(&page_address_htable[i].lock);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun #endif /* defined(HASHED_PAGE_VIRTUAL) */
485