1*4882a593Smuzhiyun /**************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
7*4882a593Smuzhiyun * copy of this software and associated documentation files (the
8*4882a593Smuzhiyun * "Software"), to deal in the Software without restriction, including
9*4882a593Smuzhiyun * without limitation the rights to use, copy, modify, merge, publish,
10*4882a593Smuzhiyun * distribute, sub license, and/or sell copies of the Software, and to
11*4882a593Smuzhiyun * permit persons to whom the Software is furnished to do so, subject to
12*4882a593Smuzhiyun * the following conditions:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the
15*4882a593Smuzhiyun * next paragraph) shall be included in all copies or substantial portions
16*4882a593Smuzhiyun * of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21*4882a593Smuzhiyun * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22*4882a593Smuzhiyun * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23*4882a593Smuzhiyun * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24*4882a593Smuzhiyun * USE OR OTHER DEALINGS IN THE SOFTWARE.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun **************************************************************************/
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/export.h>
32*4882a593Smuzhiyun #include <linux/highmem.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #include <drm/drm_cache.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #if defined(CONFIG_X86)
37*4882a593Smuzhiyun #include <asm/smp.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * clflushopt is an unordered instruction which needs fencing with mfence or
41*4882a593Smuzhiyun * sfence to avoid ordering issues. For drm_clflush_page this fencing happens
42*4882a593Smuzhiyun * in the caller.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun static void
drm_clflush_page(struct page * page)45*4882a593Smuzhiyun drm_clflush_page(struct page *page)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun uint8_t *page_virtual;
48*4882a593Smuzhiyun unsigned int i;
49*4882a593Smuzhiyun const int size = boot_cpu_data.x86_clflush_size;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (unlikely(page == NULL))
52*4882a593Smuzhiyun return;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun page_virtual = kmap_atomic(page);
55*4882a593Smuzhiyun for (i = 0; i < PAGE_SIZE; i += size)
56*4882a593Smuzhiyun clflushopt(page_virtual + i);
57*4882a593Smuzhiyun kunmap_atomic(page_virtual);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
drm_cache_flush_clflush(struct page * pages[],unsigned long num_pages)60*4882a593Smuzhiyun static void drm_cache_flush_clflush(struct page *pages[],
61*4882a593Smuzhiyun unsigned long num_pages)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun unsigned long i;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/
66*4882a593Smuzhiyun for (i = 0; i < num_pages; i++)
67*4882a593Smuzhiyun drm_clflush_page(*pages++);
68*4882a593Smuzhiyun mb(); /*Also used after CLFLUSH so that all cache is flushed*/
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * drm_clflush_pages - Flush dcache lines of a set of pages.
74*4882a593Smuzhiyun * @pages: List of pages to be flushed.
75*4882a593Smuzhiyun * @num_pages: Number of pages in the array.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * Flush every data cache line entry that points to an address belonging
78*4882a593Smuzhiyun * to a page in the array.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun void
drm_clflush_pages(struct page * pages[],unsigned long num_pages)81*4882a593Smuzhiyun drm_clflush_pages(struct page *pages[], unsigned long num_pages)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #if defined(CONFIG_X86)
85*4882a593Smuzhiyun if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
86*4882a593Smuzhiyun drm_cache_flush_clflush(pages, num_pages);
87*4882a593Smuzhiyun return;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun if (wbinvd_on_all_cpus())
91*4882a593Smuzhiyun pr_err("Timed out waiting for cache flush\n");
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #elif defined(__powerpc__)
94*4882a593Smuzhiyun unsigned long i;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun for (i = 0; i < num_pages; i++) {
97*4882a593Smuzhiyun struct page *page = pages[i];
98*4882a593Smuzhiyun void *page_virtual;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (unlikely(page == NULL))
101*4882a593Smuzhiyun continue;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun page_virtual = kmap_atomic(page);
104*4882a593Smuzhiyun flush_dcache_range((unsigned long)page_virtual,
105*4882a593Smuzhiyun (unsigned long)page_virtual + PAGE_SIZE);
106*4882a593Smuzhiyun kunmap_atomic(page_virtual);
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun #else
109*4882a593Smuzhiyun pr_err("Architecture has no drm_cache.c support\n");
110*4882a593Smuzhiyun WARN_ON_ONCE(1);
111*4882a593Smuzhiyun #endif
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL(drm_clflush_pages);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * drm_clflush_sg - Flush dcache lines pointing to a scather-gather.
117*4882a593Smuzhiyun * @st: struct sg_table.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Flush every data cache line entry that points to an address in the
120*4882a593Smuzhiyun * sg.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun void
drm_clflush_sg(struct sg_table * st)123*4882a593Smuzhiyun drm_clflush_sg(struct sg_table *st)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun #if defined(CONFIG_X86)
126*4882a593Smuzhiyun if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
127*4882a593Smuzhiyun struct sg_page_iter sg_iter;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun mb(); /*CLFLUSH is ordered only by using memory barriers*/
130*4882a593Smuzhiyun for_each_sgtable_page(st, &sg_iter, 0)
131*4882a593Smuzhiyun drm_clflush_page(sg_page_iter_page(&sg_iter));
132*4882a593Smuzhiyun mb(); /*Make sure that all cache line entry is flushed*/
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (wbinvd_on_all_cpus())
138*4882a593Smuzhiyun pr_err("Timed out waiting for cache flush\n");
139*4882a593Smuzhiyun #else
140*4882a593Smuzhiyun pr_err("Architecture has no drm_cache.c support\n");
141*4882a593Smuzhiyun WARN_ON_ONCE(1);
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun EXPORT_SYMBOL(drm_clflush_sg);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun * drm_clflush_virt_range - Flush dcache lines of a region
148*4882a593Smuzhiyun * @addr: Initial kernel memory address.
149*4882a593Smuzhiyun * @length: Region size.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Flush every data cache line entry that points to an address in the
152*4882a593Smuzhiyun * region requested.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun void
drm_clflush_virt_range(void * addr,unsigned long length)155*4882a593Smuzhiyun drm_clflush_virt_range(void *addr, unsigned long length)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun #if defined(CONFIG_X86)
158*4882a593Smuzhiyun if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
159*4882a593Smuzhiyun const int size = boot_cpu_data.x86_clflush_size;
160*4882a593Smuzhiyun void *end = addr + length;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun addr = (void *)(((unsigned long)addr) & -size);
163*4882a593Smuzhiyun mb(); /*CLFLUSH is only ordered with a full memory barrier*/
164*4882a593Smuzhiyun for (; addr < end; addr += size)
165*4882a593Smuzhiyun clflushopt(addr);
166*4882a593Smuzhiyun clflushopt(end - 1); /* force serialisation */
167*4882a593Smuzhiyun mb(); /*Ensure that evry data cache line entry is flushed*/
168*4882a593Smuzhiyun return;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (wbinvd_on_all_cpus())
172*4882a593Smuzhiyun pr_err("Timed out waiting for cache flush\n");
173*4882a593Smuzhiyun #else
174*4882a593Smuzhiyun pr_err("Architecture has no drm_cache.c support\n");
175*4882a593Smuzhiyun WARN_ON_ONCE(1);
176*4882a593Smuzhiyun #endif
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun EXPORT_SYMBOL(drm_clflush_virt_range);
179