1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright 2016-2019 HabanaLabs, Ltd.
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <uapi/misc/habanalabs.h>
9*4882a593Smuzhiyun #include "habanalabs.h"
10*4882a593Smuzhiyun #include "../include/hw_ip/mmu/mmu_general.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/genalloc.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define HL_MMU_DEBUG 0
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * The va ranges in context object contain a list with the available chunks of
20*4882a593Smuzhiyun * device virtual memory.
21*4882a593Smuzhiyun * There is one range for host allocations and one for DRAM allocations.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * On initialization each range contains one chunk of all of its available
24*4882a593Smuzhiyun * virtual range which is a half of the total device virtual range.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * On each mapping of physical pages, a suitable virtual range chunk (with a
27*4882a593Smuzhiyun * minimum size) is selected from the list. If the chunk size equals the
28*4882a593Smuzhiyun * requested size, the chunk is returned. Otherwise, the chunk is split into
29*4882a593Smuzhiyun * two chunks - one to return as result and a remainder to stay in the list.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * On each Unmapping of a virtual address, the relevant virtual chunk is
32*4882a593Smuzhiyun * returned to the list. The chunk is added to the list and if its edges match
33*4882a593Smuzhiyun * the edges of the adjacent chunks (means a contiguous chunk can be created),
34*4882a593Smuzhiyun * the chunks are merged.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * On finish, the list is checked to have only one chunk of all the relevant
37*4882a593Smuzhiyun * virtual range (which is a half of the device total virtual range).
38*4882a593Smuzhiyun * If not (means not all mappings were unmapped), a warning is printed.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * alloc_device_memory - allocate device memory
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * @ctx : current context
45*4882a593Smuzhiyun * @args : host parameters containing the requested size
46*4882a593Smuzhiyun * @ret_handle : result handle
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * This function does the following:
49*4882a593Smuzhiyun * - Allocate the requested size rounded up to 2MB pages
50*4882a593Smuzhiyun * - Return unique handle
51*4882a593Smuzhiyun */
alloc_device_memory(struct hl_ctx * ctx,struct hl_mem_in * args,u32 * ret_handle)52*4882a593Smuzhiyun static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
53*4882a593Smuzhiyun u32 *ret_handle)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
56*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
57*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack;
58*4882a593Smuzhiyun u64 paddr = 0, total_size, num_pgs, i;
59*4882a593Smuzhiyun u32 num_curr_pgs, page_size, page_shift;
60*4882a593Smuzhiyun int handle, rc;
61*4882a593Smuzhiyun bool contiguous;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun num_curr_pgs = 0;
64*4882a593Smuzhiyun page_size = hdev->asic_prop.dram_page_size;
65*4882a593Smuzhiyun page_shift = __ffs(page_size);
66*4882a593Smuzhiyun num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift;
67*4882a593Smuzhiyun total_size = num_pgs << page_shift;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (!total_size) {
70*4882a593Smuzhiyun dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
71*4882a593Smuzhiyun return -EINVAL;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun contiguous = args->flags & HL_MEM_CONTIGUOUS;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun if (contiguous) {
77*4882a593Smuzhiyun paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
78*4882a593Smuzhiyun if (!paddr) {
79*4882a593Smuzhiyun dev_err(hdev->dev,
80*4882a593Smuzhiyun "failed to allocate %llu contiguous pages with total size of %llu\n",
81*4882a593Smuzhiyun num_pgs, total_size);
82*4882a593Smuzhiyun return -ENOMEM;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
87*4882a593Smuzhiyun if (!phys_pg_pack) {
88*4882a593Smuzhiyun rc = -ENOMEM;
89*4882a593Smuzhiyun goto pages_pack_err;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
93*4882a593Smuzhiyun phys_pg_pack->asid = ctx->asid;
94*4882a593Smuzhiyun phys_pg_pack->npages = num_pgs;
95*4882a593Smuzhiyun phys_pg_pack->page_size = page_size;
96*4882a593Smuzhiyun phys_pg_pack->total_size = total_size;
97*4882a593Smuzhiyun phys_pg_pack->flags = args->flags;
98*4882a593Smuzhiyun phys_pg_pack->contiguous = contiguous;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
101*4882a593Smuzhiyun if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
102*4882a593Smuzhiyun rc = -ENOMEM;
103*4882a593Smuzhiyun goto pages_arr_err;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (phys_pg_pack->contiguous) {
107*4882a593Smuzhiyun for (i = 0 ; i < num_pgs ; i++)
108*4882a593Smuzhiyun phys_pg_pack->pages[i] = paddr + i * page_size;
109*4882a593Smuzhiyun } else {
110*4882a593Smuzhiyun for (i = 0 ; i < num_pgs ; i++) {
111*4882a593Smuzhiyun phys_pg_pack->pages[i] = (u64) gen_pool_alloc(
112*4882a593Smuzhiyun vm->dram_pg_pool,
113*4882a593Smuzhiyun page_size);
114*4882a593Smuzhiyun if (!phys_pg_pack->pages[i]) {
115*4882a593Smuzhiyun dev_err(hdev->dev,
116*4882a593Smuzhiyun "Failed to allocate device memory (out of memory)\n");
117*4882a593Smuzhiyun rc = -ENOMEM;
118*4882a593Smuzhiyun goto page_err;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun num_curr_pgs++;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun spin_lock(&vm->idr_lock);
126*4882a593Smuzhiyun handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
127*4882a593Smuzhiyun GFP_ATOMIC);
128*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (handle < 0) {
131*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to get handle for page\n");
132*4882a593Smuzhiyun rc = -EFAULT;
133*4882a593Smuzhiyun goto idr_err;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun for (i = 0 ; i < num_pgs ; i++)
137*4882a593Smuzhiyun kref_get(&vm->dram_pg_pool_refcount);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun phys_pg_pack->handle = handle;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
142*4882a593Smuzhiyun atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun *ret_handle = handle;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return 0;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun idr_err:
149*4882a593Smuzhiyun page_err:
150*4882a593Smuzhiyun if (!phys_pg_pack->contiguous)
151*4882a593Smuzhiyun for (i = 0 ; i < num_curr_pgs ; i++)
152*4882a593Smuzhiyun gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
153*4882a593Smuzhiyun page_size);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun kvfree(phys_pg_pack->pages);
156*4882a593Smuzhiyun pages_arr_err:
157*4882a593Smuzhiyun kfree(phys_pg_pack);
158*4882a593Smuzhiyun pages_pack_err:
159*4882a593Smuzhiyun if (contiguous)
160*4882a593Smuzhiyun gen_pool_free(vm->dram_pg_pool, paddr, total_size);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return rc;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * dma_map_host_va - DMA mapping of the given host virtual address.
167*4882a593Smuzhiyun * @hdev: habanalabs device structure
168*4882a593Smuzhiyun * @addr: the host virtual address of the memory area
169*4882a593Smuzhiyun * @size: the size of the memory area
170*4882a593Smuzhiyun * @p_userptr: pointer to result userptr structure
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * This function does the following:
173*4882a593Smuzhiyun * - Allocate userptr structure
174*4882a593Smuzhiyun * - Pin the given host memory using the userptr structure
175*4882a593Smuzhiyun * - Perform DMA mapping to have the DMA addresses of the pages
176*4882a593Smuzhiyun */
dma_map_host_va(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr ** p_userptr)177*4882a593Smuzhiyun static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
178*4882a593Smuzhiyun struct hl_userptr **p_userptr)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct hl_userptr *userptr;
181*4882a593Smuzhiyun int rc;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
184*4882a593Smuzhiyun if (!userptr) {
185*4882a593Smuzhiyun rc = -ENOMEM;
186*4882a593Smuzhiyun goto userptr_err;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun rc = hl_pin_host_memory(hdev, addr, size, userptr);
190*4882a593Smuzhiyun if (rc) {
191*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to pin host memory\n");
192*4882a593Smuzhiyun goto pin_err;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl,
196*4882a593Smuzhiyun userptr->sgt->nents, DMA_BIDIRECTIONAL);
197*4882a593Smuzhiyun if (rc) {
198*4882a593Smuzhiyun dev_err(hdev->dev, "failed to map sgt with DMA region\n");
199*4882a593Smuzhiyun goto dma_map_err;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun userptr->dma_mapped = true;
203*4882a593Smuzhiyun userptr->dir = DMA_BIDIRECTIONAL;
204*4882a593Smuzhiyun userptr->vm_type = VM_TYPE_USERPTR;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun *p_userptr = userptr;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun return 0;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun dma_map_err:
211*4882a593Smuzhiyun hl_unpin_host_memory(hdev, userptr);
212*4882a593Smuzhiyun pin_err:
213*4882a593Smuzhiyun kfree(userptr);
214*4882a593Smuzhiyun userptr_err:
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun return rc;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * dma_unmap_host_va - DMA unmapping of the given host virtual address.
221*4882a593Smuzhiyun * @hdev: habanalabs device structure
222*4882a593Smuzhiyun * @userptr: userptr to free
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * This function does the following:
225*4882a593Smuzhiyun * - Unpins the physical pages
226*4882a593Smuzhiyun * - Frees the userptr structure
227*4882a593Smuzhiyun */
dma_unmap_host_va(struct hl_device * hdev,struct hl_userptr * userptr)228*4882a593Smuzhiyun static void dma_unmap_host_va(struct hl_device *hdev,
229*4882a593Smuzhiyun struct hl_userptr *userptr)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun hl_unpin_host_memory(hdev, userptr);
232*4882a593Smuzhiyun kfree(userptr);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * dram_pg_pool_do_release - free DRAM pages pool
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * @ref : pointer to reference object
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * This function does the following:
241*4882a593Smuzhiyun * - Frees the idr structure of physical pages handles
242*4882a593Smuzhiyun * - Frees the generic pool of DRAM physical pages
243*4882a593Smuzhiyun */
dram_pg_pool_do_release(struct kref * ref)244*4882a593Smuzhiyun static void dram_pg_pool_do_release(struct kref *ref)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct hl_vm *vm = container_of(ref, struct hl_vm,
247*4882a593Smuzhiyun dram_pg_pool_refcount);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun * free the idr here as only here we know for sure that there are no
251*4882a593Smuzhiyun * allocated physical pages and hence there are no handles in use
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun idr_destroy(&vm->phys_pg_pack_handles);
254*4882a593Smuzhiyun gen_pool_destroy(vm->dram_pg_pool);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * free_phys_pg_pack - free physical page pack
259*4882a593Smuzhiyun * @hdev: habanalabs device structure
260*4882a593Smuzhiyun * @phys_pg_pack: physical page pack to free
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * This function does the following:
263*4882a593Smuzhiyun * - For DRAM memory only, iterate over the pack and free each physical block
264*4882a593Smuzhiyun * structure by returning it to the general pool
265*4882a593Smuzhiyun * - Free the hl_vm_phys_pg_pack structure
266*4882a593Smuzhiyun */
free_phys_pg_pack(struct hl_device * hdev,struct hl_vm_phys_pg_pack * phys_pg_pack)267*4882a593Smuzhiyun static void free_phys_pg_pack(struct hl_device *hdev,
268*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
271*4882a593Smuzhiyun u64 i;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (!phys_pg_pack->created_from_userptr) {
274*4882a593Smuzhiyun if (phys_pg_pack->contiguous) {
275*4882a593Smuzhiyun gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
276*4882a593Smuzhiyun phys_pg_pack->total_size);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun for (i = 0; i < phys_pg_pack->npages ; i++)
279*4882a593Smuzhiyun kref_put(&vm->dram_pg_pool_refcount,
280*4882a593Smuzhiyun dram_pg_pool_do_release);
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun for (i = 0 ; i < phys_pg_pack->npages ; i++) {
283*4882a593Smuzhiyun gen_pool_free(vm->dram_pg_pool,
284*4882a593Smuzhiyun phys_pg_pack->pages[i],
285*4882a593Smuzhiyun phys_pg_pack->page_size);
286*4882a593Smuzhiyun kref_put(&vm->dram_pg_pool_refcount,
287*4882a593Smuzhiyun dram_pg_pool_do_release);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun kvfree(phys_pg_pack->pages);
293*4882a593Smuzhiyun kfree(phys_pg_pack);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun * free_device_memory - free device memory
298*4882a593Smuzhiyun *
299*4882a593Smuzhiyun * @ctx : current context
300*4882a593Smuzhiyun * @handle : handle of the memory chunk to free
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * This function does the following:
303*4882a593Smuzhiyun * - Free the device memory related to the given handle
304*4882a593Smuzhiyun */
free_device_memory(struct hl_ctx * ctx,u32 handle)305*4882a593Smuzhiyun static int free_device_memory(struct hl_ctx *ctx, u32 handle)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
308*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
309*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun spin_lock(&vm->idr_lock);
312*4882a593Smuzhiyun phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
313*4882a593Smuzhiyun if (phys_pg_pack) {
314*4882a593Smuzhiyun if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
315*4882a593Smuzhiyun dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
316*4882a593Smuzhiyun handle);
317*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
318*4882a593Smuzhiyun return -EINVAL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * must remove from idr before the freeing of the physical
323*4882a593Smuzhiyun * pages as the refcount of the pool is also the trigger of the
324*4882a593Smuzhiyun * idr destroy
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun idr_remove(&vm->phys_pg_pack_handles, handle);
327*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
330*4882a593Smuzhiyun atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_pack);
333*4882a593Smuzhiyun } else {
334*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
335*4882a593Smuzhiyun dev_err(hdev->dev,
336*4882a593Smuzhiyun "free device memory failed, no match for handle %u\n",
337*4882a593Smuzhiyun handle);
338*4882a593Smuzhiyun return -EINVAL;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * clear_va_list_locked - free virtual addresses list
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * @hdev : habanalabs device structure
348*4882a593Smuzhiyun * @va_list : list of virtual addresses to free
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * This function does the following:
351*4882a593Smuzhiyun * - Iterate over the list and free each virtual addresses block
352*4882a593Smuzhiyun *
353*4882a593Smuzhiyun * This function should be called only when va_list lock is taken
354*4882a593Smuzhiyun */
clear_va_list_locked(struct hl_device * hdev,struct list_head * va_list)355*4882a593Smuzhiyun static void clear_va_list_locked(struct hl_device *hdev,
356*4882a593Smuzhiyun struct list_head *va_list)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct hl_vm_va_block *va_block, *tmp;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun list_for_each_entry_safe(va_block, tmp, va_list, node) {
361*4882a593Smuzhiyun list_del(&va_block->node);
362*4882a593Smuzhiyun kfree(va_block);
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun * print_va_list_locked - print virtual addresses list
368*4882a593Smuzhiyun *
369*4882a593Smuzhiyun * @hdev : habanalabs device structure
370*4882a593Smuzhiyun * @va_list : list of virtual addresses to print
371*4882a593Smuzhiyun *
372*4882a593Smuzhiyun * This function does the following:
373*4882a593Smuzhiyun * - Iterate over the list and print each virtual addresses block
374*4882a593Smuzhiyun *
375*4882a593Smuzhiyun * This function should be called only when va_list lock is taken
376*4882a593Smuzhiyun */
print_va_list_locked(struct hl_device * hdev,struct list_head * va_list)377*4882a593Smuzhiyun static void print_va_list_locked(struct hl_device *hdev,
378*4882a593Smuzhiyun struct list_head *va_list)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun #if HL_MMU_DEBUG
381*4882a593Smuzhiyun struct hl_vm_va_block *va_block;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun dev_dbg(hdev->dev, "print va list:\n");
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun list_for_each_entry(va_block, va_list, node)
386*4882a593Smuzhiyun dev_dbg(hdev->dev,
387*4882a593Smuzhiyun "va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
388*4882a593Smuzhiyun va_block->start, va_block->end, va_block->size);
389*4882a593Smuzhiyun #endif
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * merge_va_blocks_locked - merge a virtual block if possible
394*4882a593Smuzhiyun *
395*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
396*4882a593Smuzhiyun * @va_list : pointer to the virtual addresses block list
397*4882a593Smuzhiyun * @va_block : virtual block to merge with adjacent blocks
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * This function does the following:
400*4882a593Smuzhiyun * - Merge the given blocks with the adjacent blocks if their virtual ranges
401*4882a593Smuzhiyun * create a contiguous virtual range
402*4882a593Smuzhiyun *
403*4882a593Smuzhiyun * This Function should be called only when va_list lock is taken
404*4882a593Smuzhiyun */
merge_va_blocks_locked(struct hl_device * hdev,struct list_head * va_list,struct hl_vm_va_block * va_block)405*4882a593Smuzhiyun static void merge_va_blocks_locked(struct hl_device *hdev,
406*4882a593Smuzhiyun struct list_head *va_list, struct hl_vm_va_block *va_block)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct hl_vm_va_block *prev, *next;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun prev = list_prev_entry(va_block, node);
411*4882a593Smuzhiyun if (&prev->node != va_list && prev->end + 1 == va_block->start) {
412*4882a593Smuzhiyun prev->end = va_block->end;
413*4882a593Smuzhiyun prev->size = prev->end - prev->start;
414*4882a593Smuzhiyun list_del(&va_block->node);
415*4882a593Smuzhiyun kfree(va_block);
416*4882a593Smuzhiyun va_block = prev;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun next = list_next_entry(va_block, node);
420*4882a593Smuzhiyun if (&next->node != va_list && va_block->end + 1 == next->start) {
421*4882a593Smuzhiyun next->start = va_block->start;
422*4882a593Smuzhiyun next->size = next->end - next->start;
423*4882a593Smuzhiyun list_del(&va_block->node);
424*4882a593Smuzhiyun kfree(va_block);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /*
429*4882a593Smuzhiyun * add_va_block_locked - add a virtual block to the virtual addresses list
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
432*4882a593Smuzhiyun * @va_list : pointer to the virtual addresses block list
433*4882a593Smuzhiyun * @start : start virtual address
434*4882a593Smuzhiyun * @end : end virtual address
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * This function does the following:
437*4882a593Smuzhiyun * - Add the given block to the virtual blocks list and merge with other
438*4882a593Smuzhiyun * blocks if a contiguous virtual block can be created
439*4882a593Smuzhiyun *
440*4882a593Smuzhiyun * This Function should be called only when va_list lock is taken
441*4882a593Smuzhiyun */
add_va_block_locked(struct hl_device * hdev,struct list_head * va_list,u64 start,u64 end)442*4882a593Smuzhiyun static int add_va_block_locked(struct hl_device *hdev,
443*4882a593Smuzhiyun struct list_head *va_list, u64 start, u64 end)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun struct hl_vm_va_block *va_block, *res = NULL;
446*4882a593Smuzhiyun u64 size = end - start;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun print_va_list_locked(hdev, va_list);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun list_for_each_entry(va_block, va_list, node) {
451*4882a593Smuzhiyun /* TODO: remove upon matureness */
452*4882a593Smuzhiyun if (hl_mem_area_crosses_range(start, size, va_block->start,
453*4882a593Smuzhiyun va_block->end)) {
454*4882a593Smuzhiyun dev_err(hdev->dev,
455*4882a593Smuzhiyun "block crossing ranges at start 0x%llx, end 0x%llx\n",
456*4882a593Smuzhiyun va_block->start, va_block->end);
457*4882a593Smuzhiyun return -EINVAL;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (va_block->end < start)
461*4882a593Smuzhiyun res = va_block;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
465*4882a593Smuzhiyun if (!va_block)
466*4882a593Smuzhiyun return -ENOMEM;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun va_block->start = start;
469*4882a593Smuzhiyun va_block->end = end;
470*4882a593Smuzhiyun va_block->size = size;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (!res)
473*4882a593Smuzhiyun list_add(&va_block->node, va_list);
474*4882a593Smuzhiyun else
475*4882a593Smuzhiyun list_add(&va_block->node, &res->node);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun merge_va_blocks_locked(hdev, va_list, va_block);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun print_va_list_locked(hdev, va_list);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun return 0;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * add_va_block - wrapper for add_va_block_locked
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
488*4882a593Smuzhiyun * @va_list : pointer to the virtual addresses block list
489*4882a593Smuzhiyun * @start : start virtual address
490*4882a593Smuzhiyun * @end : end virtual address
491*4882a593Smuzhiyun *
492*4882a593Smuzhiyun * This function does the following:
493*4882a593Smuzhiyun * - Takes the list lock and calls add_va_block_locked
494*4882a593Smuzhiyun */
add_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 start,u64 end)495*4882a593Smuzhiyun static inline int add_va_block(struct hl_device *hdev,
496*4882a593Smuzhiyun struct hl_va_range *va_range, u64 start, u64 end)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun int rc;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun mutex_lock(&va_range->lock);
501*4882a593Smuzhiyun rc = add_va_block_locked(hdev, &va_range->list, start, end);
502*4882a593Smuzhiyun mutex_unlock(&va_range->lock);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun return rc;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * get_va_block() - get a virtual block for the given size and alignment.
509*4882a593Smuzhiyun * @hdev: pointer to the habanalabs device structure.
510*4882a593Smuzhiyun * @va_range: pointer to the virtual addresses range.
511*4882a593Smuzhiyun * @size: requested block size.
512*4882a593Smuzhiyun * @hint_addr: hint for requested address by the user.
513*4882a593Smuzhiyun * @va_block_align: required alignment of the virtual block start address.
514*4882a593Smuzhiyun *
515*4882a593Smuzhiyun * This function does the following:
516*4882a593Smuzhiyun * - Iterate on the virtual block list to find a suitable virtual block for the
517*4882a593Smuzhiyun * given size and alignment.
518*4882a593Smuzhiyun * - Reserve the requested block and update the list.
519*4882a593Smuzhiyun * - Return the start address of the virtual block.
520*4882a593Smuzhiyun */
get_va_block(struct hl_device * hdev,struct hl_va_range * va_range,u64 size,u64 hint_addr,u32 va_block_align)521*4882a593Smuzhiyun static u64 get_va_block(struct hl_device *hdev, struct hl_va_range *va_range,
522*4882a593Smuzhiyun u64 size, u64 hint_addr, u32 va_block_align)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun struct hl_vm_va_block *va_block, *new_va_block = NULL;
525*4882a593Smuzhiyun u64 valid_start, valid_size, prev_start, prev_end, align_mask,
526*4882a593Smuzhiyun res_valid_start = 0, res_valid_size = 0;
527*4882a593Smuzhiyun bool add_prev = false;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun align_mask = ~((u64)va_block_align - 1);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* check if hint_addr is aligned */
532*4882a593Smuzhiyun if (hint_addr & (va_block_align - 1))
533*4882a593Smuzhiyun hint_addr = 0;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun mutex_lock(&va_range->lock);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun print_va_list_locked(hdev, &va_range->list);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun list_for_each_entry(va_block, &va_range->list, node) {
540*4882a593Smuzhiyun /* calc the first possible aligned addr */
541*4882a593Smuzhiyun valid_start = va_block->start;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (valid_start & (va_block_align - 1)) {
544*4882a593Smuzhiyun valid_start &= align_mask;
545*4882a593Smuzhiyun valid_start += va_block_align;
546*4882a593Smuzhiyun if (valid_start > va_block->end)
547*4882a593Smuzhiyun continue;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun valid_size = va_block->end - valid_start;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (valid_size >= size &&
553*4882a593Smuzhiyun (!new_va_block || valid_size < res_valid_size)) {
554*4882a593Smuzhiyun new_va_block = va_block;
555*4882a593Smuzhiyun res_valid_start = valid_start;
556*4882a593Smuzhiyun res_valid_size = valid_size;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (hint_addr && hint_addr >= valid_start &&
560*4882a593Smuzhiyun ((hint_addr + size) <= va_block->end)) {
561*4882a593Smuzhiyun new_va_block = va_block;
562*4882a593Smuzhiyun res_valid_start = hint_addr;
563*4882a593Smuzhiyun res_valid_size = valid_size;
564*4882a593Smuzhiyun break;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (!new_va_block) {
569*4882a593Smuzhiyun dev_err(hdev->dev, "no available va block for size %llu\n",
570*4882a593Smuzhiyun size);
571*4882a593Smuzhiyun goto out;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (res_valid_start > new_va_block->start) {
575*4882a593Smuzhiyun prev_start = new_va_block->start;
576*4882a593Smuzhiyun prev_end = res_valid_start - 1;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun new_va_block->start = res_valid_start;
579*4882a593Smuzhiyun new_va_block->size = res_valid_size;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun add_prev = true;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun if (new_va_block->size > size) {
585*4882a593Smuzhiyun new_va_block->start += size;
586*4882a593Smuzhiyun new_va_block->size = new_va_block->end - new_va_block->start;
587*4882a593Smuzhiyun } else {
588*4882a593Smuzhiyun list_del(&new_va_block->node);
589*4882a593Smuzhiyun kfree(new_va_block);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (add_prev)
593*4882a593Smuzhiyun add_va_block_locked(hdev, &va_range->list, prev_start,
594*4882a593Smuzhiyun prev_end);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun print_va_list_locked(hdev, &va_range->list);
597*4882a593Smuzhiyun out:
598*4882a593Smuzhiyun mutex_unlock(&va_range->lock);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return res_valid_start;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * get_sg_info - get number of pages and the DMA address from SG list
605*4882a593Smuzhiyun *
606*4882a593Smuzhiyun * @sg : the SG list
607*4882a593Smuzhiyun * @dma_addr : pointer to DMA address to return
608*4882a593Smuzhiyun *
609*4882a593Smuzhiyun * Calculate the number of consecutive pages described by the SG list. Take the
610*4882a593Smuzhiyun * offset of the address in the first page, add to it the length and round it up
611*4882a593Smuzhiyun * to the number of needed pages.
612*4882a593Smuzhiyun */
get_sg_info(struct scatterlist * sg,dma_addr_t * dma_addr)613*4882a593Smuzhiyun static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun *dma_addr = sg_dma_address(sg);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
618*4882a593Smuzhiyun (PAGE_SIZE - 1)) >> PAGE_SHIFT;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /*
622*4882a593Smuzhiyun * init_phys_pg_pack_from_userptr - initialize physical page pack from host
623*4882a593Smuzhiyun * memory
624*4882a593Smuzhiyun * @ctx: current context
625*4882a593Smuzhiyun * @userptr: userptr to initialize from
626*4882a593Smuzhiyun * @pphys_pg_pack: result pointer
627*4882a593Smuzhiyun *
628*4882a593Smuzhiyun * This function does the following:
629*4882a593Smuzhiyun * - Pin the physical pages related to the given virtual block
630*4882a593Smuzhiyun * - Create a physical page pack from the physical pages related to the given
631*4882a593Smuzhiyun * virtual block
632*4882a593Smuzhiyun */
init_phys_pg_pack_from_userptr(struct hl_ctx * ctx,struct hl_userptr * userptr,struct hl_vm_phys_pg_pack ** pphys_pg_pack)633*4882a593Smuzhiyun static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
634*4882a593Smuzhiyun struct hl_userptr *userptr,
635*4882a593Smuzhiyun struct hl_vm_phys_pg_pack **pphys_pg_pack)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack;
638*4882a593Smuzhiyun struct scatterlist *sg;
639*4882a593Smuzhiyun dma_addr_t dma_addr;
640*4882a593Smuzhiyun u64 page_mask, total_npages;
641*4882a593Smuzhiyun u32 npages, page_size = PAGE_SIZE,
642*4882a593Smuzhiyun huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
643*4882a593Smuzhiyun bool first = true, is_huge_page_opt = true;
644*4882a593Smuzhiyun int rc, i, j;
645*4882a593Smuzhiyun u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
648*4882a593Smuzhiyun if (!phys_pg_pack)
649*4882a593Smuzhiyun return -ENOMEM;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun phys_pg_pack->vm_type = userptr->vm_type;
652*4882a593Smuzhiyun phys_pg_pack->created_from_userptr = true;
653*4882a593Smuzhiyun phys_pg_pack->asid = ctx->asid;
654*4882a593Smuzhiyun atomic_set(&phys_pg_pack->mapping_cnt, 1);
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun /* Only if all dma_addrs are aligned to 2MB and their
657*4882a593Smuzhiyun * sizes is at least 2MB, we can use huge page mapping.
658*4882a593Smuzhiyun * We limit the 2MB optimization to this condition,
659*4882a593Smuzhiyun * since later on we acquire the related VA range as one
660*4882a593Smuzhiyun * consecutive block.
661*4882a593Smuzhiyun */
662*4882a593Smuzhiyun total_npages = 0;
663*4882a593Smuzhiyun for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
664*4882a593Smuzhiyun npages = get_sg_info(sg, &dma_addr);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun total_npages += npages;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if ((npages % pgs_in_huge_page) ||
669*4882a593Smuzhiyun (dma_addr & (huge_page_size - 1)))
670*4882a593Smuzhiyun is_huge_page_opt = false;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (is_huge_page_opt) {
674*4882a593Smuzhiyun page_size = huge_page_size;
675*4882a593Smuzhiyun do_div(total_npages, pgs_in_huge_page);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun page_mask = ~(((u64) page_size) - 1);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
681*4882a593Smuzhiyun GFP_KERNEL);
682*4882a593Smuzhiyun if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
683*4882a593Smuzhiyun rc = -ENOMEM;
684*4882a593Smuzhiyun goto page_pack_arr_mem_err;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun phys_pg_pack->npages = total_npages;
688*4882a593Smuzhiyun phys_pg_pack->page_size = page_size;
689*4882a593Smuzhiyun phys_pg_pack->total_size = total_npages * page_size;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun j = 0;
692*4882a593Smuzhiyun for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
693*4882a593Smuzhiyun npages = get_sg_info(sg, &dma_addr);
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun /* align down to physical page size and save the offset */
696*4882a593Smuzhiyun if (first) {
697*4882a593Smuzhiyun first = false;
698*4882a593Smuzhiyun phys_pg_pack->offset = dma_addr & (page_size - 1);
699*4882a593Smuzhiyun dma_addr &= page_mask;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun while (npages) {
703*4882a593Smuzhiyun phys_pg_pack->pages[j++] = dma_addr;
704*4882a593Smuzhiyun dma_addr += page_size;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun if (is_huge_page_opt)
707*4882a593Smuzhiyun npages -= pgs_in_huge_page;
708*4882a593Smuzhiyun else
709*4882a593Smuzhiyun npages--;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun *pphys_pg_pack = phys_pg_pack;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun return 0;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun page_pack_arr_mem_err:
718*4882a593Smuzhiyun kfree(phys_pg_pack);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun return rc;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * map_phys_pg_pack - maps the physical page pack.
725*4882a593Smuzhiyun * @ctx: current context
726*4882a593Smuzhiyun * @vaddr: start address of the virtual area to map from
727*4882a593Smuzhiyun * @phys_pg_pack: the pack of physical pages to map to
728*4882a593Smuzhiyun *
729*4882a593Smuzhiyun * This function does the following:
730*4882a593Smuzhiyun * - Maps each chunk of virtual memory to matching physical chunk
731*4882a593Smuzhiyun * - Stores number of successful mappings in the given argument
732*4882a593Smuzhiyun * - Returns 0 on success, error code otherwise
733*4882a593Smuzhiyun */
map_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)734*4882a593Smuzhiyun static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
735*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
738*4882a593Smuzhiyun u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
739*4882a593Smuzhiyun u32 page_size = phys_pg_pack->page_size;
740*4882a593Smuzhiyun int rc = 0;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun for (i = 0 ; i < phys_pg_pack->npages ; i++) {
743*4882a593Smuzhiyun paddr = phys_pg_pack->pages[i];
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size,
746*4882a593Smuzhiyun (i + 1) == phys_pg_pack->npages);
747*4882a593Smuzhiyun if (rc) {
748*4882a593Smuzhiyun dev_err(hdev->dev,
749*4882a593Smuzhiyun "map failed for handle %u, npages: %llu, mapped: %llu",
750*4882a593Smuzhiyun phys_pg_pack->handle, phys_pg_pack->npages,
751*4882a593Smuzhiyun mapped_pg_cnt);
752*4882a593Smuzhiyun goto err;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun mapped_pg_cnt++;
756*4882a593Smuzhiyun next_vaddr += page_size;
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun return 0;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun err:
762*4882a593Smuzhiyun next_vaddr = vaddr;
763*4882a593Smuzhiyun for (i = 0 ; i < mapped_pg_cnt ; i++) {
764*4882a593Smuzhiyun if (hl_mmu_unmap(ctx, next_vaddr, page_size,
765*4882a593Smuzhiyun (i + 1) == mapped_pg_cnt))
766*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
767*4882a593Smuzhiyun "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
768*4882a593Smuzhiyun phys_pg_pack->handle, next_vaddr,
769*4882a593Smuzhiyun phys_pg_pack->pages[i], page_size);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun next_vaddr += page_size;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return rc;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /*
778*4882a593Smuzhiyun * unmap_phys_pg_pack - unmaps the physical page pack
779*4882a593Smuzhiyun * @ctx: current context
780*4882a593Smuzhiyun * @vaddr: start address of the virtual area to unmap
781*4882a593Smuzhiyun * @phys_pg_pack: the pack of physical pages to unmap
782*4882a593Smuzhiyun */
unmap_phys_pg_pack(struct hl_ctx * ctx,u64 vaddr,struct hl_vm_phys_pg_pack * phys_pg_pack)783*4882a593Smuzhiyun static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
784*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
787*4882a593Smuzhiyun u64 next_vaddr, i;
788*4882a593Smuzhiyun u32 page_size;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun page_size = phys_pg_pack->page_size;
791*4882a593Smuzhiyun next_vaddr = vaddr;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
794*4882a593Smuzhiyun if (hl_mmu_unmap(ctx, next_vaddr, page_size,
795*4882a593Smuzhiyun (i + 1) == phys_pg_pack->npages))
796*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
797*4882a593Smuzhiyun "unmap failed for vaddr: 0x%llx\n", next_vaddr);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * unmapping on Palladium can be really long, so avoid a CPU
801*4882a593Smuzhiyun * soft lockup bug by sleeping a little between unmapping pages
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun if (hdev->pldm)
804*4882a593Smuzhiyun usleep_range(500, 1000);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
get_paddr_from_handle(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * paddr)808*4882a593Smuzhiyun static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
809*4882a593Smuzhiyun u64 *paddr)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
812*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
813*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack;
814*4882a593Smuzhiyun u32 handle;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun handle = lower_32_bits(args->map_device.handle);
817*4882a593Smuzhiyun spin_lock(&vm->idr_lock);
818*4882a593Smuzhiyun phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
819*4882a593Smuzhiyun if (!phys_pg_pack) {
820*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
821*4882a593Smuzhiyun dev_err(hdev->dev, "no match for handle %u\n", handle);
822*4882a593Smuzhiyun return -EINVAL;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun *paddr = phys_pg_pack->pages[0];
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun return 0;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /*
833*4882a593Smuzhiyun * map_device_va - map the given memory
834*4882a593Smuzhiyun *
835*4882a593Smuzhiyun * @ctx : current context
836*4882a593Smuzhiyun * @args : host parameters with handle/host virtual address
837*4882a593Smuzhiyun * @device_addr : pointer to result device virtual address
838*4882a593Smuzhiyun *
839*4882a593Smuzhiyun * This function does the following:
840*4882a593Smuzhiyun * - If given a physical device memory handle, map to a device virtual block
841*4882a593Smuzhiyun * and return the start address of this block
842*4882a593Smuzhiyun * - If given a host virtual address and size, find the related physical pages,
843*4882a593Smuzhiyun * map a device virtual block to this pages and return the start address of
844*4882a593Smuzhiyun * this block
845*4882a593Smuzhiyun */
map_device_va(struct hl_ctx * ctx,struct hl_mem_in * args,u64 * device_addr)846*4882a593Smuzhiyun static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
847*4882a593Smuzhiyun u64 *device_addr)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
850*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
851*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack;
852*4882a593Smuzhiyun struct hl_userptr *userptr = NULL;
853*4882a593Smuzhiyun struct hl_vm_hash_node *hnode;
854*4882a593Smuzhiyun struct hl_va_range *va_range;
855*4882a593Smuzhiyun enum vm_type_t *vm_type;
856*4882a593Smuzhiyun u64 ret_vaddr, hint_addr;
857*4882a593Smuzhiyun u32 handle = 0, va_block_align;
858*4882a593Smuzhiyun int rc;
859*4882a593Smuzhiyun bool is_userptr = args->flags & HL_MEM_USERPTR;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /* Assume failure */
862*4882a593Smuzhiyun *device_addr = 0;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (is_userptr) {
865*4882a593Smuzhiyun u64 addr = args->map_host.host_virt_addr,
866*4882a593Smuzhiyun size = args->map_host.mem_size;
867*4882a593Smuzhiyun u32 page_size = hdev->asic_prop.pmmu.page_size,
868*4882a593Smuzhiyun huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun rc = dma_map_host_va(hdev, addr, size, &userptr);
871*4882a593Smuzhiyun if (rc) {
872*4882a593Smuzhiyun dev_err(hdev->dev, "failed to get userptr from va\n");
873*4882a593Smuzhiyun return rc;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun rc = init_phys_pg_pack_from_userptr(ctx, userptr,
877*4882a593Smuzhiyun &phys_pg_pack);
878*4882a593Smuzhiyun if (rc) {
879*4882a593Smuzhiyun dev_err(hdev->dev,
880*4882a593Smuzhiyun "unable to init page pack for vaddr 0x%llx\n",
881*4882a593Smuzhiyun addr);
882*4882a593Smuzhiyun goto init_page_pack_err;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun vm_type = (enum vm_type_t *) userptr;
886*4882a593Smuzhiyun hint_addr = args->map_host.hint_addr;
887*4882a593Smuzhiyun handle = phys_pg_pack->handle;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /* get required alignment */
890*4882a593Smuzhiyun if (phys_pg_pack->page_size == page_size) {
891*4882a593Smuzhiyun va_range = ctx->host_va_range;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /*
894*4882a593Smuzhiyun * huge page alignment may be needed in case of regular
895*4882a593Smuzhiyun * page mapping, depending on the host VA alignment
896*4882a593Smuzhiyun */
897*4882a593Smuzhiyun if (addr & (huge_page_size - 1))
898*4882a593Smuzhiyun va_block_align = page_size;
899*4882a593Smuzhiyun else
900*4882a593Smuzhiyun va_block_align = huge_page_size;
901*4882a593Smuzhiyun } else {
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * huge page alignment is needed in case of huge page
904*4882a593Smuzhiyun * mapping
905*4882a593Smuzhiyun */
906*4882a593Smuzhiyun va_range = ctx->host_huge_va_range;
907*4882a593Smuzhiyun va_block_align = huge_page_size;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun } else {
910*4882a593Smuzhiyun handle = lower_32_bits(args->map_device.handle);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun spin_lock(&vm->idr_lock);
913*4882a593Smuzhiyun phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
914*4882a593Smuzhiyun if (!phys_pg_pack) {
915*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
916*4882a593Smuzhiyun dev_err(hdev->dev,
917*4882a593Smuzhiyun "no match for handle %u\n", handle);
918*4882a593Smuzhiyun return -EINVAL;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /* increment now to avoid freeing device memory while mapping */
922*4882a593Smuzhiyun atomic_inc(&phys_pg_pack->mapping_cnt);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun vm_type = (enum vm_type_t *) phys_pg_pack;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun hint_addr = args->map_device.hint_addr;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* DRAM VA alignment is the same as the DRAM page size */
931*4882a593Smuzhiyun va_range = ctx->dram_va_range;
932*4882a593Smuzhiyun va_block_align = hdev->asic_prop.dmmu.page_size;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /*
936*4882a593Smuzhiyun * relevant for mapping device physical memory only, as host memory is
937*4882a593Smuzhiyun * implicitly shared
938*4882a593Smuzhiyun */
939*4882a593Smuzhiyun if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
940*4882a593Smuzhiyun phys_pg_pack->asid != ctx->asid) {
941*4882a593Smuzhiyun dev_err(hdev->dev,
942*4882a593Smuzhiyun "Failed to map memory, handle %u is not shared\n",
943*4882a593Smuzhiyun handle);
944*4882a593Smuzhiyun rc = -EPERM;
945*4882a593Smuzhiyun goto shared_err;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
949*4882a593Smuzhiyun if (!hnode) {
950*4882a593Smuzhiyun rc = -ENOMEM;
951*4882a593Smuzhiyun goto hnode_err;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
955*4882a593Smuzhiyun hint_addr, va_block_align);
956*4882a593Smuzhiyun if (!ret_vaddr) {
957*4882a593Smuzhiyun dev_err(hdev->dev, "no available va block for handle %u\n",
958*4882a593Smuzhiyun handle);
959*4882a593Smuzhiyun rc = -ENOMEM;
960*4882a593Smuzhiyun goto va_block_err;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun mutex_lock(&ctx->mmu_lock);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
966*4882a593Smuzhiyun if (rc) {
967*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
968*4882a593Smuzhiyun dev_err(hdev->dev, "mapping page pack failed for handle %u\n",
969*4882a593Smuzhiyun handle);
970*4882a593Smuzhiyun goto map_err;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, false, *vm_type);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun if (rc) {
978*4882a593Smuzhiyun dev_err(hdev->dev,
979*4882a593Smuzhiyun "mapping handle %u failed due to MMU cache invalidation\n",
980*4882a593Smuzhiyun handle);
981*4882a593Smuzhiyun goto map_err;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun ret_vaddr += phys_pg_pack->offset;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun hnode->ptr = vm_type;
987*4882a593Smuzhiyun hnode->vaddr = ret_vaddr;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun mutex_lock(&ctx->mem_hash_lock);
990*4882a593Smuzhiyun hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
991*4882a593Smuzhiyun mutex_unlock(&ctx->mem_hash_lock);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun *device_addr = ret_vaddr;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun if (is_userptr)
996*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_pack);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun return 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun map_err:
1001*4882a593Smuzhiyun if (add_va_block(hdev, va_range, ret_vaddr,
1002*4882a593Smuzhiyun ret_vaddr + phys_pg_pack->total_size - 1))
1003*4882a593Smuzhiyun dev_warn(hdev->dev,
1004*4882a593Smuzhiyun "release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1005*4882a593Smuzhiyun handle, ret_vaddr);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun va_block_err:
1008*4882a593Smuzhiyun kfree(hnode);
1009*4882a593Smuzhiyun hnode_err:
1010*4882a593Smuzhiyun shared_err:
1011*4882a593Smuzhiyun atomic_dec(&phys_pg_pack->mapping_cnt);
1012*4882a593Smuzhiyun if (is_userptr)
1013*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_pack);
1014*4882a593Smuzhiyun init_page_pack_err:
1015*4882a593Smuzhiyun if (is_userptr)
1016*4882a593Smuzhiyun dma_unmap_host_va(hdev, userptr);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun return rc;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun /*
1022*4882a593Smuzhiyun * unmap_device_va - unmap the given device virtual address
1023*4882a593Smuzhiyun *
1024*4882a593Smuzhiyun * @ctx : current context
1025*4882a593Smuzhiyun * @vaddr : device virtual address to unmap
1026*4882a593Smuzhiyun * @ctx_free : true if in context free flow, false otherwise.
1027*4882a593Smuzhiyun *
1028*4882a593Smuzhiyun * This function does the following:
1029*4882a593Smuzhiyun * - Unmap the physical pages related to the given virtual address
1030*4882a593Smuzhiyun * - return the device virtual block to the virtual block list
1031*4882a593Smuzhiyun */
unmap_device_va(struct hl_ctx * ctx,u64 vaddr,bool ctx_free)1032*4882a593Smuzhiyun static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr, bool ctx_free)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
1035*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1036*4882a593Smuzhiyun struct hl_vm_hash_node *hnode = NULL;
1037*4882a593Smuzhiyun struct hl_userptr *userptr = NULL;
1038*4882a593Smuzhiyun struct hl_va_range *va_range;
1039*4882a593Smuzhiyun enum vm_type_t *vm_type;
1040*4882a593Smuzhiyun bool is_userptr;
1041*4882a593Smuzhiyun int rc = 0;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* protect from double entrance */
1044*4882a593Smuzhiyun mutex_lock(&ctx->mem_hash_lock);
1045*4882a593Smuzhiyun hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr)
1046*4882a593Smuzhiyun if (vaddr == hnode->vaddr)
1047*4882a593Smuzhiyun break;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun if (!hnode) {
1050*4882a593Smuzhiyun mutex_unlock(&ctx->mem_hash_lock);
1051*4882a593Smuzhiyun dev_err(hdev->dev,
1052*4882a593Smuzhiyun "unmap failed, no mem hnode for vaddr 0x%llx\n",
1053*4882a593Smuzhiyun vaddr);
1054*4882a593Smuzhiyun return -EINVAL;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun hash_del(&hnode->node);
1058*4882a593Smuzhiyun mutex_unlock(&ctx->mem_hash_lock);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun vm_type = hnode->ptr;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun if (*vm_type == VM_TYPE_USERPTR) {
1063*4882a593Smuzhiyun is_userptr = true;
1064*4882a593Smuzhiyun userptr = hnode->ptr;
1065*4882a593Smuzhiyun rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1066*4882a593Smuzhiyun &phys_pg_pack);
1067*4882a593Smuzhiyun if (rc) {
1068*4882a593Smuzhiyun dev_err(hdev->dev,
1069*4882a593Smuzhiyun "unable to init page pack for vaddr 0x%llx\n",
1070*4882a593Smuzhiyun vaddr);
1071*4882a593Smuzhiyun goto vm_type_err;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun if (phys_pg_pack->page_size ==
1075*4882a593Smuzhiyun hdev->asic_prop.pmmu.page_size)
1076*4882a593Smuzhiyun va_range = ctx->host_va_range;
1077*4882a593Smuzhiyun else
1078*4882a593Smuzhiyun va_range = ctx->host_huge_va_range;
1079*4882a593Smuzhiyun } else if (*vm_type == VM_TYPE_PHYS_PACK) {
1080*4882a593Smuzhiyun is_userptr = false;
1081*4882a593Smuzhiyun va_range = ctx->dram_va_range;
1082*4882a593Smuzhiyun phys_pg_pack = hnode->ptr;
1083*4882a593Smuzhiyun } else {
1084*4882a593Smuzhiyun dev_warn(hdev->dev,
1085*4882a593Smuzhiyun "unmap failed, unknown vm desc for vaddr 0x%llx\n",
1086*4882a593Smuzhiyun vaddr);
1087*4882a593Smuzhiyun rc = -EFAULT;
1088*4882a593Smuzhiyun goto vm_type_err;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1092*4882a593Smuzhiyun dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1093*4882a593Smuzhiyun rc = -EINVAL;
1094*4882a593Smuzhiyun goto mapping_cnt_err;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun mutex_lock(&ctx->mmu_lock);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /*
1104*4882a593Smuzhiyun * During context free this function is called in a loop to clean all
1105*4882a593Smuzhiyun * the context mappings. Hence the cache invalidation can be called once
1106*4882a593Smuzhiyun * at the loop end rather than for each iteration
1107*4882a593Smuzhiyun */
1108*4882a593Smuzhiyun if (!ctx_free)
1109*4882a593Smuzhiyun rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
1110*4882a593Smuzhiyun *vm_type);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * If the context is closing we don't need to check for the MMU cache
1116*4882a593Smuzhiyun * invalidation return code and update the VA free list as in this flow
1117*4882a593Smuzhiyun * we invalidate the MMU cache outside of this unmap function and the VA
1118*4882a593Smuzhiyun * free list will be freed anyway.
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun if (!ctx_free) {
1121*4882a593Smuzhiyun int tmp_rc;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (rc)
1124*4882a593Smuzhiyun dev_err(hdev->dev,
1125*4882a593Smuzhiyun "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
1126*4882a593Smuzhiyun vaddr);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun tmp_rc = add_va_block(hdev, va_range, vaddr,
1129*4882a593Smuzhiyun vaddr + phys_pg_pack->total_size - 1);
1130*4882a593Smuzhiyun if (tmp_rc) {
1131*4882a593Smuzhiyun dev_warn(hdev->dev,
1132*4882a593Smuzhiyun "add va block failed for vaddr: 0x%llx\n",
1133*4882a593Smuzhiyun vaddr);
1134*4882a593Smuzhiyun if (!rc)
1135*4882a593Smuzhiyun rc = tmp_rc;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun atomic_dec(&phys_pg_pack->mapping_cnt);
1140*4882a593Smuzhiyun kfree(hnode);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun if (is_userptr) {
1143*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_pack);
1144*4882a593Smuzhiyun dma_unmap_host_va(hdev, userptr);
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun return rc;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun mapping_cnt_err:
1150*4882a593Smuzhiyun if (is_userptr)
1151*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_pack);
1152*4882a593Smuzhiyun vm_type_err:
1153*4882a593Smuzhiyun mutex_lock(&ctx->mem_hash_lock);
1154*4882a593Smuzhiyun hash_add(ctx->mem_hash, &hnode->node, vaddr);
1155*4882a593Smuzhiyun mutex_unlock(&ctx->mem_hash_lock);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun return rc;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
mem_ioctl_no_mmu(struct hl_fpriv * hpriv,union hl_mem_args * args)1160*4882a593Smuzhiyun static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun struct hl_device *hdev = hpriv->hdev;
1163*4882a593Smuzhiyun struct hl_ctx *ctx = hpriv->ctx;
1164*4882a593Smuzhiyun u64 device_addr = 0;
1165*4882a593Smuzhiyun u32 handle = 0;
1166*4882a593Smuzhiyun int rc;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun switch (args->in.op) {
1169*4882a593Smuzhiyun case HL_MEM_OP_ALLOC:
1170*4882a593Smuzhiyun if (args->in.alloc.mem_size == 0) {
1171*4882a593Smuzhiyun dev_err(hdev->dev,
1172*4882a593Smuzhiyun "alloc size must be larger than 0\n");
1173*4882a593Smuzhiyun rc = -EINVAL;
1174*4882a593Smuzhiyun goto out;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun /* Force contiguous as there are no real MMU
1178*4882a593Smuzhiyun * translations to overcome physical memory gaps
1179*4882a593Smuzhiyun */
1180*4882a593Smuzhiyun args->in.flags |= HL_MEM_CONTIGUOUS;
1181*4882a593Smuzhiyun rc = alloc_device_memory(ctx, &args->in, &handle);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun memset(args, 0, sizeof(*args));
1184*4882a593Smuzhiyun args->out.handle = (__u64) handle;
1185*4882a593Smuzhiyun break;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun case HL_MEM_OP_FREE:
1188*4882a593Smuzhiyun rc = free_device_memory(ctx, args->in.free.handle);
1189*4882a593Smuzhiyun break;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun case HL_MEM_OP_MAP:
1192*4882a593Smuzhiyun if (args->in.flags & HL_MEM_USERPTR) {
1193*4882a593Smuzhiyun device_addr = args->in.map_host.host_virt_addr;
1194*4882a593Smuzhiyun rc = 0;
1195*4882a593Smuzhiyun } else {
1196*4882a593Smuzhiyun rc = get_paddr_from_handle(ctx, &args->in,
1197*4882a593Smuzhiyun &device_addr);
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun memset(args, 0, sizeof(*args));
1201*4882a593Smuzhiyun args->out.device_virt_addr = device_addr;
1202*4882a593Smuzhiyun break;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun case HL_MEM_OP_UNMAP:
1205*4882a593Smuzhiyun rc = 0;
1206*4882a593Smuzhiyun break;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun default:
1209*4882a593Smuzhiyun dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1210*4882a593Smuzhiyun rc = -ENOTTY;
1211*4882a593Smuzhiyun break;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun out:
1215*4882a593Smuzhiyun return rc;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
hl_mem_ioctl(struct hl_fpriv * hpriv,void * data)1218*4882a593Smuzhiyun int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun union hl_mem_args *args = data;
1221*4882a593Smuzhiyun struct hl_device *hdev = hpriv->hdev;
1222*4882a593Smuzhiyun struct hl_ctx *ctx = hpriv->ctx;
1223*4882a593Smuzhiyun u64 device_addr = 0;
1224*4882a593Smuzhiyun u32 handle = 0;
1225*4882a593Smuzhiyun int rc;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun if (hl_device_disabled_or_in_reset(hdev)) {
1228*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
1229*4882a593Smuzhiyun "Device is %s. Can't execute MEMORY IOCTL\n",
1230*4882a593Smuzhiyun atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
1231*4882a593Smuzhiyun return -EBUSY;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun if (!hdev->mmu_enable)
1235*4882a593Smuzhiyun return mem_ioctl_no_mmu(hpriv, args);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun switch (args->in.op) {
1238*4882a593Smuzhiyun case HL_MEM_OP_ALLOC:
1239*4882a593Smuzhiyun if (!hdev->dram_supports_virtual_memory) {
1240*4882a593Smuzhiyun dev_err(hdev->dev, "DRAM alloc is not supported\n");
1241*4882a593Smuzhiyun rc = -EINVAL;
1242*4882a593Smuzhiyun goto out;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if (args->in.alloc.mem_size == 0) {
1246*4882a593Smuzhiyun dev_err(hdev->dev,
1247*4882a593Smuzhiyun "alloc size must be larger than 0\n");
1248*4882a593Smuzhiyun rc = -EINVAL;
1249*4882a593Smuzhiyun goto out;
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun rc = alloc_device_memory(ctx, &args->in, &handle);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun memset(args, 0, sizeof(*args));
1254*4882a593Smuzhiyun args->out.handle = (__u64) handle;
1255*4882a593Smuzhiyun break;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun case HL_MEM_OP_FREE:
1258*4882a593Smuzhiyun rc = free_device_memory(ctx, args->in.free.handle);
1259*4882a593Smuzhiyun break;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun case HL_MEM_OP_MAP:
1262*4882a593Smuzhiyun rc = map_device_va(ctx, &args->in, &device_addr);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun memset(args, 0, sizeof(*args));
1265*4882a593Smuzhiyun args->out.device_virt_addr = device_addr;
1266*4882a593Smuzhiyun break;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun case HL_MEM_OP_UNMAP:
1269*4882a593Smuzhiyun rc = unmap_device_va(ctx, args->in.unmap.device_virt_addr,
1270*4882a593Smuzhiyun false);
1271*4882a593Smuzhiyun break;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun default:
1274*4882a593Smuzhiyun dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
1275*4882a593Smuzhiyun rc = -ENOTTY;
1276*4882a593Smuzhiyun break;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun out:
1280*4882a593Smuzhiyun return rc;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
get_user_memory(struct hl_device * hdev,u64 addr,u64 size,u32 npages,u64 start,u32 offset,struct hl_userptr * userptr)1283*4882a593Smuzhiyun static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
1284*4882a593Smuzhiyun u32 npages, u64 start, u32 offset,
1285*4882a593Smuzhiyun struct hl_userptr *userptr)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun int rc;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (!access_ok((void __user *) (uintptr_t) addr, size)) {
1290*4882a593Smuzhiyun dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
1291*4882a593Smuzhiyun return -EFAULT;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun userptr->vec = frame_vector_create(npages);
1295*4882a593Smuzhiyun if (!userptr->vec) {
1296*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to create frame vector\n");
1297*4882a593Smuzhiyun return -ENOMEM;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
1301*4882a593Smuzhiyun userptr->vec);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (rc != npages) {
1304*4882a593Smuzhiyun dev_err(hdev->dev,
1305*4882a593Smuzhiyun "Failed to map host memory, user ptr probably wrong\n");
1306*4882a593Smuzhiyun if (rc < 0)
1307*4882a593Smuzhiyun goto destroy_framevec;
1308*4882a593Smuzhiyun rc = -EFAULT;
1309*4882a593Smuzhiyun goto put_framevec;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun if (frame_vector_to_pages(userptr->vec) < 0) {
1313*4882a593Smuzhiyun dev_err(hdev->dev,
1314*4882a593Smuzhiyun "Failed to translate frame vector to pages\n");
1315*4882a593Smuzhiyun rc = -EFAULT;
1316*4882a593Smuzhiyun goto put_framevec;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun rc = sg_alloc_table_from_pages(userptr->sgt,
1320*4882a593Smuzhiyun frame_vector_pages(userptr->vec),
1321*4882a593Smuzhiyun npages, offset, size, GFP_ATOMIC);
1322*4882a593Smuzhiyun if (rc < 0) {
1323*4882a593Smuzhiyun dev_err(hdev->dev, "failed to create SG table from pages\n");
1324*4882a593Smuzhiyun goto put_framevec;
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun return 0;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun put_framevec:
1330*4882a593Smuzhiyun put_vaddr_frames(userptr->vec);
1331*4882a593Smuzhiyun destroy_framevec:
1332*4882a593Smuzhiyun frame_vector_destroy(userptr->vec);
1333*4882a593Smuzhiyun return rc;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /*
1337*4882a593Smuzhiyun * hl_pin_host_memory - pins a chunk of host memory.
1338*4882a593Smuzhiyun * @hdev: pointer to the habanalabs device structure
1339*4882a593Smuzhiyun * @addr: the host virtual address of the memory area
1340*4882a593Smuzhiyun * @size: the size of the memory area
1341*4882a593Smuzhiyun * @userptr: pointer to hl_userptr structure
1342*4882a593Smuzhiyun *
1343*4882a593Smuzhiyun * This function does the following:
1344*4882a593Smuzhiyun * - Pins the physical pages
1345*4882a593Smuzhiyun * - Create an SG list from those pages
1346*4882a593Smuzhiyun */
hl_pin_host_memory(struct hl_device * hdev,u64 addr,u64 size,struct hl_userptr * userptr)1347*4882a593Smuzhiyun int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
1348*4882a593Smuzhiyun struct hl_userptr *userptr)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun u64 start, end;
1351*4882a593Smuzhiyun u32 npages, offset;
1352*4882a593Smuzhiyun int rc;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (!size) {
1355*4882a593Smuzhiyun dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
1356*4882a593Smuzhiyun return -EINVAL;
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /*
1360*4882a593Smuzhiyun * If the combination of the address and size requested for this memory
1361*4882a593Smuzhiyun * region causes an integer overflow, return error.
1362*4882a593Smuzhiyun */
1363*4882a593Smuzhiyun if (((addr + size) < addr) ||
1364*4882a593Smuzhiyun PAGE_ALIGN(addr + size) < (addr + size)) {
1365*4882a593Smuzhiyun dev_err(hdev->dev,
1366*4882a593Smuzhiyun "user pointer 0x%llx + %llu causes integer overflow\n",
1367*4882a593Smuzhiyun addr, size);
1368*4882a593Smuzhiyun return -EINVAL;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun /*
1372*4882a593Smuzhiyun * This function can be called also from data path, hence use atomic
1373*4882a593Smuzhiyun * always as it is not a big allocation.
1374*4882a593Smuzhiyun */
1375*4882a593Smuzhiyun userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC);
1376*4882a593Smuzhiyun if (!userptr->sgt)
1377*4882a593Smuzhiyun return -ENOMEM;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun start = addr & PAGE_MASK;
1380*4882a593Smuzhiyun offset = addr & ~PAGE_MASK;
1381*4882a593Smuzhiyun end = PAGE_ALIGN(addr + size);
1382*4882a593Smuzhiyun npages = (end - start) >> PAGE_SHIFT;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun userptr->size = size;
1385*4882a593Smuzhiyun userptr->addr = addr;
1386*4882a593Smuzhiyun userptr->dma_mapped = false;
1387*4882a593Smuzhiyun INIT_LIST_HEAD(&userptr->job_node);
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun rc = get_user_memory(hdev, addr, size, npages, start, offset,
1390*4882a593Smuzhiyun userptr);
1391*4882a593Smuzhiyun if (rc) {
1392*4882a593Smuzhiyun dev_err(hdev->dev,
1393*4882a593Smuzhiyun "failed to get user memory for address 0x%llx\n",
1394*4882a593Smuzhiyun addr);
1395*4882a593Smuzhiyun goto free_sgt;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun hl_debugfs_add_userptr(hdev, userptr);
1399*4882a593Smuzhiyun
1400*4882a593Smuzhiyun return 0;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun free_sgt:
1403*4882a593Smuzhiyun kfree(userptr->sgt);
1404*4882a593Smuzhiyun return rc;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun /*
1408*4882a593Smuzhiyun * hl_unpin_host_memory - unpins a chunk of host memory.
1409*4882a593Smuzhiyun * @hdev: pointer to the habanalabs device structure
1410*4882a593Smuzhiyun * @userptr: pointer to hl_userptr structure
1411*4882a593Smuzhiyun *
1412*4882a593Smuzhiyun * This function does the following:
1413*4882a593Smuzhiyun * - Unpins the physical pages related to the host memory
1414*4882a593Smuzhiyun * - Free the SG list
1415*4882a593Smuzhiyun */
hl_unpin_host_memory(struct hl_device * hdev,struct hl_userptr * userptr)1416*4882a593Smuzhiyun void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
1417*4882a593Smuzhiyun {
1418*4882a593Smuzhiyun struct page **pages;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun hl_debugfs_remove_userptr(hdev, userptr);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun if (userptr->dma_mapped)
1423*4882a593Smuzhiyun hdev->asic_funcs->hl_dma_unmap_sg(hdev, userptr->sgt->sgl,
1424*4882a593Smuzhiyun userptr->sgt->nents,
1425*4882a593Smuzhiyun userptr->dir);
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun pages = frame_vector_pages(userptr->vec);
1428*4882a593Smuzhiyun if (!IS_ERR(pages)) {
1429*4882a593Smuzhiyun int i;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun for (i = 0; i < frame_vector_count(userptr->vec); i++)
1432*4882a593Smuzhiyun set_page_dirty_lock(pages[i]);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun put_vaddr_frames(userptr->vec);
1435*4882a593Smuzhiyun frame_vector_destroy(userptr->vec);
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun list_del(&userptr->job_node);
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun sg_free_table(userptr->sgt);
1440*4882a593Smuzhiyun kfree(userptr->sgt);
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun /*
1444*4882a593Smuzhiyun * hl_userptr_delete_list - clear userptr list
1445*4882a593Smuzhiyun *
1446*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
1447*4882a593Smuzhiyun * @userptr_list : pointer to the list to clear
1448*4882a593Smuzhiyun *
1449*4882a593Smuzhiyun * This function does the following:
1450*4882a593Smuzhiyun * - Iterates over the list and unpins the host memory and frees the userptr
1451*4882a593Smuzhiyun * structure.
1452*4882a593Smuzhiyun */
hl_userptr_delete_list(struct hl_device * hdev,struct list_head * userptr_list)1453*4882a593Smuzhiyun void hl_userptr_delete_list(struct hl_device *hdev,
1454*4882a593Smuzhiyun struct list_head *userptr_list)
1455*4882a593Smuzhiyun {
1456*4882a593Smuzhiyun struct hl_userptr *userptr, *tmp;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
1459*4882a593Smuzhiyun hl_unpin_host_memory(hdev, userptr);
1460*4882a593Smuzhiyun kfree(userptr);
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun INIT_LIST_HEAD(userptr_list);
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /*
1467*4882a593Smuzhiyun * hl_userptr_is_pinned - returns whether the given userptr is pinned
1468*4882a593Smuzhiyun *
1469*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
1470*4882a593Smuzhiyun * @userptr_list : pointer to the list to clear
1471*4882a593Smuzhiyun * @userptr : pointer to userptr to check
1472*4882a593Smuzhiyun *
1473*4882a593Smuzhiyun * This function does the following:
1474*4882a593Smuzhiyun * - Iterates over the list and checks if the given userptr is in it, means is
1475*4882a593Smuzhiyun * pinned. If so, returns true, otherwise returns false.
1476*4882a593Smuzhiyun */
hl_userptr_is_pinned(struct hl_device * hdev,u64 addr,u32 size,struct list_head * userptr_list,struct hl_userptr ** userptr)1477*4882a593Smuzhiyun bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
1478*4882a593Smuzhiyun u32 size, struct list_head *userptr_list,
1479*4882a593Smuzhiyun struct hl_userptr **userptr)
1480*4882a593Smuzhiyun {
1481*4882a593Smuzhiyun list_for_each_entry((*userptr), userptr_list, job_node) {
1482*4882a593Smuzhiyun if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
1483*4882a593Smuzhiyun return true;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun return false;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun /*
1490*4882a593Smuzhiyun * va_range_init - initialize virtual addresses range
1491*4882a593Smuzhiyun * @hdev: pointer to the habanalabs device structure
1492*4882a593Smuzhiyun * @va_range: pointer to the range to initialize
1493*4882a593Smuzhiyun * @start: range start address
1494*4882a593Smuzhiyun * @end: range end address
1495*4882a593Smuzhiyun *
1496*4882a593Smuzhiyun * This function does the following:
1497*4882a593Smuzhiyun * - Initializes the virtual addresses list of the given range with the given
1498*4882a593Smuzhiyun * addresses.
1499*4882a593Smuzhiyun */
va_range_init(struct hl_device * hdev,struct hl_va_range * va_range,u64 start,u64 end)1500*4882a593Smuzhiyun static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
1501*4882a593Smuzhiyun u64 start, u64 end)
1502*4882a593Smuzhiyun {
1503*4882a593Smuzhiyun int rc;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun INIT_LIST_HEAD(&va_range->list);
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun /* PAGE_SIZE alignment */
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun if (start & (PAGE_SIZE - 1)) {
1510*4882a593Smuzhiyun start &= PAGE_MASK;
1511*4882a593Smuzhiyun start += PAGE_SIZE;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (end & (PAGE_SIZE - 1))
1515*4882a593Smuzhiyun end &= PAGE_MASK;
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun if (start >= end) {
1518*4882a593Smuzhiyun dev_err(hdev->dev, "too small vm range for va list\n");
1519*4882a593Smuzhiyun return -EFAULT;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun rc = add_va_block(hdev, va_range, start, end);
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun if (rc) {
1525*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to init host va list\n");
1526*4882a593Smuzhiyun return rc;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun va_range->start_addr = start;
1530*4882a593Smuzhiyun va_range->end_addr = end;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun return 0;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun /*
1536*4882a593Smuzhiyun * va_range_fini() - clear a virtual addresses range
1537*4882a593Smuzhiyun * @hdev: pointer to the habanalabs structure
1538*4882a593Smuzhiyun * va_range: pointer to virtual addresses range
1539*4882a593Smuzhiyun *
1540*4882a593Smuzhiyun * This function does the following:
1541*4882a593Smuzhiyun * - Frees the virtual addresses block list and its lock
1542*4882a593Smuzhiyun */
va_range_fini(struct hl_device * hdev,struct hl_va_range * va_range)1543*4882a593Smuzhiyun static void va_range_fini(struct hl_device *hdev,
1544*4882a593Smuzhiyun struct hl_va_range *va_range)
1545*4882a593Smuzhiyun {
1546*4882a593Smuzhiyun mutex_lock(&va_range->lock);
1547*4882a593Smuzhiyun clear_va_list_locked(hdev, &va_range->list);
1548*4882a593Smuzhiyun mutex_unlock(&va_range->lock);
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun mutex_destroy(&va_range->lock);
1551*4882a593Smuzhiyun kfree(va_range);
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun /*
1555*4882a593Smuzhiyun * vm_ctx_init_with_ranges() - initialize virtual memory for context
1556*4882a593Smuzhiyun * @ctx: pointer to the habanalabs context structure
1557*4882a593Smuzhiyun * @host_range_start: host virtual addresses range start.
1558*4882a593Smuzhiyun * @host_range_end: host virtual addresses range end.
1559*4882a593Smuzhiyun * @host_huge_range_start: host virtual addresses range start for memory
1560*4882a593Smuzhiyun * allocated with huge pages.
1561*4882a593Smuzhiyun * @host_huge_range_end: host virtual addresses range end for memory allocated
1562*4882a593Smuzhiyun * with huge pages.
1563*4882a593Smuzhiyun * @dram_range_start: dram virtual addresses range start.
1564*4882a593Smuzhiyun * @dram_range_end: dram virtual addresses range end.
1565*4882a593Smuzhiyun *
1566*4882a593Smuzhiyun * This function initializes the following:
1567*4882a593Smuzhiyun * - MMU for context
1568*4882a593Smuzhiyun * - Virtual address to area descriptor hashtable
1569*4882a593Smuzhiyun * - Virtual block list of available virtual memory
1570*4882a593Smuzhiyun */
vm_ctx_init_with_ranges(struct hl_ctx * ctx,u64 host_range_start,u64 host_range_end,u64 host_huge_range_start,u64 host_huge_range_end,u64 dram_range_start,u64 dram_range_end)1571*4882a593Smuzhiyun static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
1572*4882a593Smuzhiyun u64 host_range_start,
1573*4882a593Smuzhiyun u64 host_range_end,
1574*4882a593Smuzhiyun u64 host_huge_range_start,
1575*4882a593Smuzhiyun u64 host_huge_range_end,
1576*4882a593Smuzhiyun u64 dram_range_start,
1577*4882a593Smuzhiyun u64 dram_range_end)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
1580*4882a593Smuzhiyun int rc;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun ctx->host_va_range = kzalloc(sizeof(*ctx->host_va_range), GFP_KERNEL);
1583*4882a593Smuzhiyun if (!ctx->host_va_range)
1584*4882a593Smuzhiyun return -ENOMEM;
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun ctx->host_huge_va_range = kzalloc(sizeof(*ctx->host_huge_va_range),
1587*4882a593Smuzhiyun GFP_KERNEL);
1588*4882a593Smuzhiyun if (!ctx->host_huge_va_range) {
1589*4882a593Smuzhiyun rc = -ENOMEM;
1590*4882a593Smuzhiyun goto host_huge_va_range_err;
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun ctx->dram_va_range = kzalloc(sizeof(*ctx->dram_va_range), GFP_KERNEL);
1594*4882a593Smuzhiyun if (!ctx->dram_va_range) {
1595*4882a593Smuzhiyun rc = -ENOMEM;
1596*4882a593Smuzhiyun goto dram_va_range_err;
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun rc = hl_mmu_ctx_init(ctx);
1600*4882a593Smuzhiyun if (rc) {
1601*4882a593Smuzhiyun dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
1602*4882a593Smuzhiyun goto mmu_ctx_err;
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun
1605*4882a593Smuzhiyun mutex_init(&ctx->mem_hash_lock);
1606*4882a593Smuzhiyun hash_init(ctx->mem_hash);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun mutex_init(&ctx->host_va_range->lock);
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun rc = va_range_init(hdev, ctx->host_va_range, host_range_start,
1611*4882a593Smuzhiyun host_range_end);
1612*4882a593Smuzhiyun if (rc) {
1613*4882a593Smuzhiyun dev_err(hdev->dev, "failed to init host vm range\n");
1614*4882a593Smuzhiyun goto host_page_range_err;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun if (hdev->pmmu_huge_range) {
1618*4882a593Smuzhiyun mutex_init(&ctx->host_huge_va_range->lock);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun rc = va_range_init(hdev, ctx->host_huge_va_range,
1621*4882a593Smuzhiyun host_huge_range_start,
1622*4882a593Smuzhiyun host_huge_range_end);
1623*4882a593Smuzhiyun if (rc) {
1624*4882a593Smuzhiyun dev_err(hdev->dev,
1625*4882a593Smuzhiyun "failed to init host huge vm range\n");
1626*4882a593Smuzhiyun goto host_hpage_range_err;
1627*4882a593Smuzhiyun }
1628*4882a593Smuzhiyun } else {
1629*4882a593Smuzhiyun kfree(ctx->host_huge_va_range);
1630*4882a593Smuzhiyun ctx->host_huge_va_range = ctx->host_va_range;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun mutex_init(&ctx->dram_va_range->lock);
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun rc = va_range_init(hdev, ctx->dram_va_range, dram_range_start,
1636*4882a593Smuzhiyun dram_range_end);
1637*4882a593Smuzhiyun if (rc) {
1638*4882a593Smuzhiyun dev_err(hdev->dev, "failed to init dram vm range\n");
1639*4882a593Smuzhiyun goto dram_vm_err;
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
1642*4882a593Smuzhiyun hl_debugfs_add_ctx_mem_hash(hdev, ctx);
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun return 0;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun dram_vm_err:
1647*4882a593Smuzhiyun mutex_destroy(&ctx->dram_va_range->lock);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun if (hdev->pmmu_huge_range) {
1650*4882a593Smuzhiyun mutex_lock(&ctx->host_huge_va_range->lock);
1651*4882a593Smuzhiyun clear_va_list_locked(hdev, &ctx->host_huge_va_range->list);
1652*4882a593Smuzhiyun mutex_unlock(&ctx->host_huge_va_range->lock);
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun host_hpage_range_err:
1655*4882a593Smuzhiyun if (hdev->pmmu_huge_range)
1656*4882a593Smuzhiyun mutex_destroy(&ctx->host_huge_va_range->lock);
1657*4882a593Smuzhiyun mutex_lock(&ctx->host_va_range->lock);
1658*4882a593Smuzhiyun clear_va_list_locked(hdev, &ctx->host_va_range->list);
1659*4882a593Smuzhiyun mutex_unlock(&ctx->host_va_range->lock);
1660*4882a593Smuzhiyun host_page_range_err:
1661*4882a593Smuzhiyun mutex_destroy(&ctx->host_va_range->lock);
1662*4882a593Smuzhiyun mutex_destroy(&ctx->mem_hash_lock);
1663*4882a593Smuzhiyun hl_mmu_ctx_fini(ctx);
1664*4882a593Smuzhiyun mmu_ctx_err:
1665*4882a593Smuzhiyun kfree(ctx->dram_va_range);
1666*4882a593Smuzhiyun dram_va_range_err:
1667*4882a593Smuzhiyun kfree(ctx->host_huge_va_range);
1668*4882a593Smuzhiyun host_huge_va_range_err:
1669*4882a593Smuzhiyun kfree(ctx->host_va_range);
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun return rc;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
hl_vm_ctx_init(struct hl_ctx * ctx)1674*4882a593Smuzhiyun int hl_vm_ctx_init(struct hl_ctx *ctx)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
1677*4882a593Smuzhiyun u64 host_range_start, host_range_end, host_huge_range_start,
1678*4882a593Smuzhiyun host_huge_range_end, dram_range_start, dram_range_end;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun atomic64_set(&ctx->dram_phys_mem, 0);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /*
1683*4882a593Smuzhiyun * - If MMU is enabled, init the ranges as usual.
1684*4882a593Smuzhiyun * - If MMU is disabled, in case of host mapping, the returned address
1685*4882a593Smuzhiyun * is the given one.
1686*4882a593Smuzhiyun * In case of DRAM mapping, the returned address is the physical
1687*4882a593Smuzhiyun * address of the memory related to the given handle.
1688*4882a593Smuzhiyun */
1689*4882a593Smuzhiyun if (ctx->hdev->mmu_enable) {
1690*4882a593Smuzhiyun dram_range_start = prop->dmmu.start_addr;
1691*4882a593Smuzhiyun dram_range_end = prop->dmmu.end_addr;
1692*4882a593Smuzhiyun host_range_start = prop->pmmu.start_addr;
1693*4882a593Smuzhiyun host_range_end = prop->pmmu.end_addr;
1694*4882a593Smuzhiyun host_huge_range_start = prop->pmmu_huge.start_addr;
1695*4882a593Smuzhiyun host_huge_range_end = prop->pmmu_huge.end_addr;
1696*4882a593Smuzhiyun } else {
1697*4882a593Smuzhiyun dram_range_start = prop->dram_user_base_address;
1698*4882a593Smuzhiyun dram_range_end = prop->dram_end_address;
1699*4882a593Smuzhiyun host_range_start = prop->dram_user_base_address;
1700*4882a593Smuzhiyun host_range_end = prop->dram_end_address;
1701*4882a593Smuzhiyun host_huge_range_start = prop->dram_user_base_address;
1702*4882a593Smuzhiyun host_huge_range_end = prop->dram_end_address;
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
1706*4882a593Smuzhiyun host_huge_range_start,
1707*4882a593Smuzhiyun host_huge_range_end,
1708*4882a593Smuzhiyun dram_range_start,
1709*4882a593Smuzhiyun dram_range_end);
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun /*
1713*4882a593Smuzhiyun * hl_vm_ctx_fini - virtual memory teardown of context
1714*4882a593Smuzhiyun *
1715*4882a593Smuzhiyun * @ctx : pointer to the habanalabs context structure
1716*4882a593Smuzhiyun *
1717*4882a593Smuzhiyun * This function perform teardown the following:
1718*4882a593Smuzhiyun * - Virtual block list of available virtual memory
1719*4882a593Smuzhiyun * - Virtual address to area descriptor hashtable
1720*4882a593Smuzhiyun * - MMU for context
1721*4882a593Smuzhiyun *
1722*4882a593Smuzhiyun * In addition this function does the following:
1723*4882a593Smuzhiyun * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
1724*4882a593Smuzhiyun * hashtable should be empty as no valid mappings should exist at this
1725*4882a593Smuzhiyun * point.
1726*4882a593Smuzhiyun * - Frees any existing physical page list from the idr which relates to the
1727*4882a593Smuzhiyun * current context asid.
1728*4882a593Smuzhiyun * - This function checks the virtual block list for correctness. At this point
1729*4882a593Smuzhiyun * the list should contain one element which describes the whole virtual
1730*4882a593Smuzhiyun * memory range of the context. Otherwise, a warning is printed.
1731*4882a593Smuzhiyun */
hl_vm_ctx_fini(struct hl_ctx * ctx)1732*4882a593Smuzhiyun void hl_vm_ctx_fini(struct hl_ctx *ctx)
1733*4882a593Smuzhiyun {
1734*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
1735*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
1736*4882a593Smuzhiyun struct hl_vm_phys_pg_pack *phys_pg_list;
1737*4882a593Smuzhiyun struct hl_vm_hash_node *hnode;
1738*4882a593Smuzhiyun struct hlist_node *tmp_node;
1739*4882a593Smuzhiyun int i;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /*
1744*4882a593Smuzhiyun * Clearly something went wrong on hard reset so no point in printing
1745*4882a593Smuzhiyun * another side effect error
1746*4882a593Smuzhiyun */
1747*4882a593Smuzhiyun if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
1748*4882a593Smuzhiyun dev_notice(hdev->dev,
1749*4882a593Smuzhiyun "user released device without removing its memory mappings\n");
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
1752*4882a593Smuzhiyun dev_dbg(hdev->dev,
1753*4882a593Smuzhiyun "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
1754*4882a593Smuzhiyun hnode->vaddr, ctx->asid);
1755*4882a593Smuzhiyun unmap_device_va(ctx, hnode->vaddr, true);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /* invalidate the cache once after the unmapping loop */
1759*4882a593Smuzhiyun hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
1760*4882a593Smuzhiyun hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun spin_lock(&vm->idr_lock);
1763*4882a593Smuzhiyun idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
1764*4882a593Smuzhiyun if (phys_pg_list->asid == ctx->asid) {
1765*4882a593Smuzhiyun dev_dbg(hdev->dev,
1766*4882a593Smuzhiyun "page list 0x%px of asid %d is still alive\n",
1767*4882a593Smuzhiyun phys_pg_list, ctx->asid);
1768*4882a593Smuzhiyun atomic64_sub(phys_pg_list->total_size,
1769*4882a593Smuzhiyun &hdev->dram_used_mem);
1770*4882a593Smuzhiyun free_phys_pg_pack(hdev, phys_pg_list);
1771*4882a593Smuzhiyun idr_remove(&vm->phys_pg_pack_handles, i);
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun spin_unlock(&vm->idr_lock);
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun va_range_fini(hdev, ctx->dram_va_range);
1776*4882a593Smuzhiyun if (hdev->pmmu_huge_range)
1777*4882a593Smuzhiyun va_range_fini(hdev, ctx->host_huge_va_range);
1778*4882a593Smuzhiyun va_range_fini(hdev, ctx->host_va_range);
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun mutex_destroy(&ctx->mem_hash_lock);
1781*4882a593Smuzhiyun hl_mmu_ctx_fini(ctx);
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * hl_vm_init - initialize virtual memory module
1786*4882a593Smuzhiyun *
1787*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
1788*4882a593Smuzhiyun *
1789*4882a593Smuzhiyun * This function initializes the following:
1790*4882a593Smuzhiyun * - MMU module
1791*4882a593Smuzhiyun * - DRAM physical pages pool of 2MB
1792*4882a593Smuzhiyun * - Idr for device memory allocation handles
1793*4882a593Smuzhiyun */
hl_vm_init(struct hl_device * hdev)1794*4882a593Smuzhiyun int hl_vm_init(struct hl_device *hdev)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
1797*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
1798*4882a593Smuzhiyun int rc;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1);
1801*4882a593Smuzhiyun if (!vm->dram_pg_pool) {
1802*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to create dram page pool\n");
1803*4882a593Smuzhiyun return -ENOMEM;
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun kref_init(&vm->dram_pg_pool_refcount);
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
1809*4882a593Smuzhiyun prop->dram_end_address - prop->dram_user_base_address,
1810*4882a593Smuzhiyun -1);
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun if (rc) {
1813*4882a593Smuzhiyun dev_err(hdev->dev,
1814*4882a593Smuzhiyun "Failed to add memory to dram page pool %d\n", rc);
1815*4882a593Smuzhiyun goto pool_add_err;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun spin_lock_init(&vm->idr_lock);
1819*4882a593Smuzhiyun idr_init(&vm->phys_pg_pack_handles);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun atomic64_set(&hdev->dram_used_mem, 0);
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun vm->init_done = true;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun return 0;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun pool_add_err:
1828*4882a593Smuzhiyun gen_pool_destroy(vm->dram_pg_pool);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun return rc;
1831*4882a593Smuzhiyun }
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun /*
1834*4882a593Smuzhiyun * hl_vm_fini - virtual memory module teardown
1835*4882a593Smuzhiyun *
1836*4882a593Smuzhiyun * @hdev : pointer to the habanalabs device structure
1837*4882a593Smuzhiyun *
1838*4882a593Smuzhiyun * This function perform teardown to the following:
1839*4882a593Smuzhiyun * - Idr for device memory allocation handles
1840*4882a593Smuzhiyun * - DRAM physical pages pool of 2MB
1841*4882a593Smuzhiyun * - MMU module
1842*4882a593Smuzhiyun */
hl_vm_fini(struct hl_device * hdev)1843*4882a593Smuzhiyun void hl_vm_fini(struct hl_device *hdev)
1844*4882a593Smuzhiyun {
1845*4882a593Smuzhiyun struct hl_vm *vm = &hdev->vm;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun if (!vm->init_done)
1848*4882a593Smuzhiyun return;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun /*
1851*4882a593Smuzhiyun * At this point all the contexts should be freed and hence no DRAM
1852*4882a593Smuzhiyun * memory should be in use. Hence the DRAM pool should be freed here.
1853*4882a593Smuzhiyun */
1854*4882a593Smuzhiyun if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
1855*4882a593Smuzhiyun dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
1856*4882a593Smuzhiyun __func__);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun vm->init_done = false;
1859*4882a593Smuzhiyun }
1860