1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright 2016-2019 HabanaLabs, Ltd.
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <uapi/misc/habanalabs.h>
9*4882a593Smuzhiyun #include "habanalabs.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/uaccess.h>
14*4882a593Smuzhiyun #include <linux/genalloc.h>
15*4882a593Smuzhiyun
cb_map_mem(struct hl_ctx * ctx,struct hl_cb * cb)16*4882a593Smuzhiyun static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
19*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
20*4882a593Smuzhiyun struct hl_vm_va_block *va_block, *tmp;
21*4882a593Smuzhiyun dma_addr_t bus_addr;
22*4882a593Smuzhiyun u64 virt_addr;
23*4882a593Smuzhiyun u32 page_size = prop->pmmu.page_size;
24*4882a593Smuzhiyun s32 offset;
25*4882a593Smuzhiyun int rc;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun if (!hdev->supports_cb_mapping) {
28*4882a593Smuzhiyun dev_err_ratelimited(hdev->dev,
29*4882a593Smuzhiyun "Cannot map CB because no VA range is allocated for CB mapping\n");
30*4882a593Smuzhiyun return -EINVAL;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (!hdev->mmu_enable) {
34*4882a593Smuzhiyun dev_err_ratelimited(hdev->dev,
35*4882a593Smuzhiyun "Cannot map CB because MMU is disabled\n");
36*4882a593Smuzhiyun return -EINVAL;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun INIT_LIST_HEAD(&cb->va_block_list);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun for (bus_addr = cb->bus_address;
42*4882a593Smuzhiyun bus_addr < cb->bus_address + cb->size;
43*4882a593Smuzhiyun bus_addr += page_size) {
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun virt_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, page_size);
46*4882a593Smuzhiyun if (!virt_addr) {
47*4882a593Smuzhiyun dev_err(hdev->dev,
48*4882a593Smuzhiyun "Failed to allocate device virtual address for CB\n");
49*4882a593Smuzhiyun rc = -ENOMEM;
50*4882a593Smuzhiyun goto err_va_pool_free;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun va_block = kzalloc(sizeof(*va_block), GFP_KERNEL);
54*4882a593Smuzhiyun if (!va_block) {
55*4882a593Smuzhiyun rc = -ENOMEM;
56*4882a593Smuzhiyun gen_pool_free(ctx->cb_va_pool, virt_addr, page_size);
57*4882a593Smuzhiyun goto err_va_pool_free;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun va_block->start = virt_addr;
61*4882a593Smuzhiyun va_block->end = virt_addr + page_size;
62*4882a593Smuzhiyun va_block->size = page_size;
63*4882a593Smuzhiyun list_add_tail(&va_block->node, &cb->va_block_list);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun mutex_lock(&ctx->mmu_lock);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun bus_addr = cb->bus_address;
69*4882a593Smuzhiyun offset = 0;
70*4882a593Smuzhiyun list_for_each_entry(va_block, &cb->va_block_list, node) {
71*4882a593Smuzhiyun rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
72*4882a593Smuzhiyun list_is_last(&va_block->node,
73*4882a593Smuzhiyun &cb->va_block_list));
74*4882a593Smuzhiyun if (rc) {
75*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
76*4882a593Smuzhiyun va_block->start);
77*4882a593Smuzhiyun goto err_va_umap;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun bus_addr += va_block->size;
81*4882a593Smuzhiyun offset += va_block->size;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun cb->is_mmu_mapped = true;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return 0;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun err_va_umap:
93*4882a593Smuzhiyun list_for_each_entry(va_block, &cb->va_block_list, node) {
94*4882a593Smuzhiyun if (offset <= 0)
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun hl_mmu_unmap(ctx, va_block->start, va_block->size,
97*4882a593Smuzhiyun offset <= va_block->size);
98*4882a593Smuzhiyun offset -= va_block->size;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun err_va_pool_free:
106*4882a593Smuzhiyun list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
107*4882a593Smuzhiyun gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
108*4882a593Smuzhiyun list_del(&va_block->node);
109*4882a593Smuzhiyun kfree(va_block);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun return rc;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
cb_unmap_mem(struct hl_ctx * ctx,struct hl_cb * cb)115*4882a593Smuzhiyun static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
118*4882a593Smuzhiyun struct hl_vm_va_block *va_block, *tmp;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun mutex_lock(&ctx->mmu_lock);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun list_for_each_entry(va_block, &cb->va_block_list, node)
123*4882a593Smuzhiyun if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
124*4882a593Smuzhiyun list_is_last(&va_block->node,
125*4882a593Smuzhiyun &cb->va_block_list)))
126*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
127*4882a593Smuzhiyun "Failed to unmap CB's va 0x%llx\n",
128*4882a593Smuzhiyun va_block->start);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun mutex_unlock(&ctx->mmu_lock);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun list_for_each_entry_safe(va_block, tmp, &cb->va_block_list, node) {
135*4882a593Smuzhiyun gen_pool_free(ctx->cb_va_pool, va_block->start, va_block->size);
136*4882a593Smuzhiyun list_del(&va_block->node);
137*4882a593Smuzhiyun kfree(va_block);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
cb_fini(struct hl_device * hdev,struct hl_cb * cb)141*4882a593Smuzhiyun static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun if (cb->is_internal)
144*4882a593Smuzhiyun gen_pool_free(hdev->internal_cb_pool,
145*4882a593Smuzhiyun (uintptr_t)cb->kernel_address, cb->size);
146*4882a593Smuzhiyun else
147*4882a593Smuzhiyun hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
148*4882a593Smuzhiyun cb->kernel_address, cb->bus_address);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun kfree(cb);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
cb_do_release(struct hl_device * hdev,struct hl_cb * cb)153*4882a593Smuzhiyun static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun if (cb->is_pool) {
156*4882a593Smuzhiyun spin_lock(&hdev->cb_pool_lock);
157*4882a593Smuzhiyun list_add(&cb->pool_list, &hdev->cb_pool);
158*4882a593Smuzhiyun spin_unlock(&hdev->cb_pool_lock);
159*4882a593Smuzhiyun } else {
160*4882a593Smuzhiyun cb_fini(hdev, cb);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
cb_release(struct kref * ref)164*4882a593Smuzhiyun static void cb_release(struct kref *ref)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct hl_device *hdev;
167*4882a593Smuzhiyun struct hl_cb *cb;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun cb = container_of(ref, struct hl_cb, refcount);
170*4882a593Smuzhiyun hdev = cb->hdev;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun hl_debugfs_remove_cb(cb);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (cb->is_mmu_mapped)
175*4882a593Smuzhiyun cb_unmap_mem(cb->ctx, cb);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun hl_ctx_put(cb->ctx);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun cb_do_release(hdev, cb);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
hl_cb_alloc(struct hl_device * hdev,u32 cb_size,int ctx_id,bool internal_cb)182*4882a593Smuzhiyun static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
183*4882a593Smuzhiyun int ctx_id, bool internal_cb)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct hl_cb *cb;
186*4882a593Smuzhiyun u32 cb_offset;
187*4882a593Smuzhiyun void *p;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun * We use of GFP_ATOMIC here because this function can be called from
191*4882a593Smuzhiyun * the latency-sensitive code path for command submission. Due to H/W
192*4882a593Smuzhiyun * limitations in some of the ASICs, the kernel must copy the user CB
193*4882a593Smuzhiyun * that is designated for an external queue and actually enqueue
194*4882a593Smuzhiyun * the kernel's copy. Hence, we must never sleep in this code section
195*4882a593Smuzhiyun * and must use GFP_ATOMIC for all memory allocations.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (ctx_id == HL_KERNEL_ASID_ID)
198*4882a593Smuzhiyun cb = kzalloc(sizeof(*cb), GFP_ATOMIC);
199*4882a593Smuzhiyun else
200*4882a593Smuzhiyun cb = kzalloc(sizeof(*cb), GFP_KERNEL);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (!cb)
203*4882a593Smuzhiyun return NULL;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (internal_cb) {
206*4882a593Smuzhiyun p = (void *) gen_pool_alloc(hdev->internal_cb_pool, cb_size);
207*4882a593Smuzhiyun if (!p) {
208*4882a593Smuzhiyun kfree(cb);
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun cb_offset = p - hdev->internal_cb_pool_virt_addr;
213*4882a593Smuzhiyun cb->is_internal = true;
214*4882a593Smuzhiyun cb->bus_address = hdev->internal_cb_va_base + cb_offset;
215*4882a593Smuzhiyun } else if (ctx_id == HL_KERNEL_ASID_ID) {
216*4882a593Smuzhiyun p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
217*4882a593Smuzhiyun &cb->bus_address, GFP_ATOMIC);
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
220*4882a593Smuzhiyun &cb->bus_address,
221*4882a593Smuzhiyun GFP_USER | __GFP_ZERO);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (!p) {
225*4882a593Smuzhiyun dev_err(hdev->dev,
226*4882a593Smuzhiyun "failed to allocate %d of dma memory for CB\n",
227*4882a593Smuzhiyun cb_size);
228*4882a593Smuzhiyun kfree(cb);
229*4882a593Smuzhiyun return NULL;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun cb->kernel_address = p;
233*4882a593Smuzhiyun cb->size = cb_size;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return cb;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
hl_cb_create(struct hl_device * hdev,struct hl_cb_mgr * mgr,struct hl_ctx * ctx,u32 cb_size,bool internal_cb,bool map_cb,u64 * handle)238*4882a593Smuzhiyun int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
239*4882a593Smuzhiyun struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
240*4882a593Smuzhiyun bool map_cb, u64 *handle)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun struct hl_cb *cb;
243*4882a593Smuzhiyun bool alloc_new_cb = true;
244*4882a593Smuzhiyun int rc, ctx_id = ctx->asid;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Can't use generic function to check this because of special case
248*4882a593Smuzhiyun * where we create a CB as part of the reset process
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
251*4882a593Smuzhiyun (ctx_id != HL_KERNEL_ASID_ID))) {
252*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
253*4882a593Smuzhiyun "Device is disabled or in reset. Can't create new CBs\n");
254*4882a593Smuzhiyun rc = -EBUSY;
255*4882a593Smuzhiyun goto out_err;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (cb_size > SZ_2M) {
259*4882a593Smuzhiyun dev_err(hdev->dev, "CB size %d must be less than %d\n",
260*4882a593Smuzhiyun cb_size, SZ_2M);
261*4882a593Smuzhiyun rc = -EINVAL;
262*4882a593Smuzhiyun goto out_err;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (!internal_cb) {
266*4882a593Smuzhiyun /* Minimum allocation must be PAGE SIZE */
267*4882a593Smuzhiyun if (cb_size < PAGE_SIZE)
268*4882a593Smuzhiyun cb_size = PAGE_SIZE;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (ctx_id == HL_KERNEL_ASID_ID &&
271*4882a593Smuzhiyun cb_size <= hdev->asic_prop.cb_pool_cb_size) {
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun spin_lock(&hdev->cb_pool_lock);
274*4882a593Smuzhiyun if (!list_empty(&hdev->cb_pool)) {
275*4882a593Smuzhiyun cb = list_first_entry(&hdev->cb_pool,
276*4882a593Smuzhiyun typeof(*cb), pool_list);
277*4882a593Smuzhiyun list_del(&cb->pool_list);
278*4882a593Smuzhiyun spin_unlock(&hdev->cb_pool_lock);
279*4882a593Smuzhiyun alloc_new_cb = false;
280*4882a593Smuzhiyun } else {
281*4882a593Smuzhiyun spin_unlock(&hdev->cb_pool_lock);
282*4882a593Smuzhiyun dev_dbg(hdev->dev, "CB pool is empty\n");
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (alloc_new_cb) {
288*4882a593Smuzhiyun cb = hl_cb_alloc(hdev, cb_size, ctx_id, internal_cb);
289*4882a593Smuzhiyun if (!cb) {
290*4882a593Smuzhiyun rc = -ENOMEM;
291*4882a593Smuzhiyun goto out_err;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun cb->hdev = hdev;
296*4882a593Smuzhiyun cb->ctx = ctx;
297*4882a593Smuzhiyun hl_ctx_get(hdev, cb->ctx);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (map_cb) {
300*4882a593Smuzhiyun if (ctx_id == HL_KERNEL_ASID_ID) {
301*4882a593Smuzhiyun dev_err(hdev->dev,
302*4882a593Smuzhiyun "CB mapping is not supported for kernel context\n");
303*4882a593Smuzhiyun rc = -EINVAL;
304*4882a593Smuzhiyun goto release_cb;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun rc = cb_map_mem(ctx, cb);
308*4882a593Smuzhiyun if (rc)
309*4882a593Smuzhiyun goto release_cb;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun spin_lock(&mgr->cb_lock);
313*4882a593Smuzhiyun rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC);
314*4882a593Smuzhiyun spin_unlock(&mgr->cb_lock);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (rc < 0) {
317*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n");
318*4882a593Smuzhiyun goto unmap_mem;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun cb->id = (u64) rc;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun kref_init(&cb->refcount);
324*4882a593Smuzhiyun spin_lock_init(&cb->lock);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * idr is 32-bit so we can safely OR it with a mask that is above
328*4882a593Smuzhiyun * 32 bit
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun *handle = cb->id | HL_MMAP_TYPE_CB;
331*4882a593Smuzhiyun *handle <<= PAGE_SHIFT;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun hl_debugfs_add_cb(cb);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun unmap_mem:
338*4882a593Smuzhiyun if (cb->is_mmu_mapped)
339*4882a593Smuzhiyun cb_unmap_mem(cb->ctx, cb);
340*4882a593Smuzhiyun release_cb:
341*4882a593Smuzhiyun hl_ctx_put(cb->ctx);
342*4882a593Smuzhiyun cb_do_release(hdev, cb);
343*4882a593Smuzhiyun out_err:
344*4882a593Smuzhiyun *handle = 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return rc;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
hl_cb_destroy(struct hl_device * hdev,struct hl_cb_mgr * mgr,u64 cb_handle)349*4882a593Smuzhiyun int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct hl_cb *cb;
352*4882a593Smuzhiyun u32 handle;
353*4882a593Smuzhiyun int rc = 0;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * handle was given to user to do mmap, I need to shift it back to
357*4882a593Smuzhiyun * how the idr module gave it to me
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun cb_handle >>= PAGE_SHIFT;
360*4882a593Smuzhiyun handle = (u32) cb_handle;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun spin_lock(&mgr->cb_lock);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun cb = idr_find(&mgr->cb_handles, handle);
365*4882a593Smuzhiyun if (cb) {
366*4882a593Smuzhiyun idr_remove(&mgr->cb_handles, handle);
367*4882a593Smuzhiyun spin_unlock(&mgr->cb_lock);
368*4882a593Smuzhiyun kref_put(&cb->refcount, cb_release);
369*4882a593Smuzhiyun } else {
370*4882a593Smuzhiyun spin_unlock(&mgr->cb_lock);
371*4882a593Smuzhiyun dev_err(hdev->dev,
372*4882a593Smuzhiyun "CB destroy failed, no match to handle 0x%x\n", handle);
373*4882a593Smuzhiyun rc = -EINVAL;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return rc;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
hl_cb_ioctl(struct hl_fpriv * hpriv,void * data)379*4882a593Smuzhiyun int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun union hl_cb_args *args = data;
382*4882a593Smuzhiyun struct hl_device *hdev = hpriv->hdev;
383*4882a593Smuzhiyun u64 handle = 0;
384*4882a593Smuzhiyun int rc;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (hl_device_disabled_or_in_reset(hdev)) {
387*4882a593Smuzhiyun dev_warn_ratelimited(hdev->dev,
388*4882a593Smuzhiyun "Device is %s. Can't execute CB IOCTL\n",
389*4882a593Smuzhiyun atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
390*4882a593Smuzhiyun return -EBUSY;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun switch (args->in.op) {
394*4882a593Smuzhiyun case HL_CB_OP_CREATE:
395*4882a593Smuzhiyun if (args->in.cb_size > HL_MAX_CB_SIZE) {
396*4882a593Smuzhiyun dev_err(hdev->dev,
397*4882a593Smuzhiyun "User requested CB size %d must be less than %d\n",
398*4882a593Smuzhiyun args->in.cb_size, HL_MAX_CB_SIZE);
399*4882a593Smuzhiyun rc = -EINVAL;
400*4882a593Smuzhiyun } else {
401*4882a593Smuzhiyun rc = hl_cb_create(hdev, &hpriv->cb_mgr, hpriv->ctx,
402*4882a593Smuzhiyun args->in.cb_size, false,
403*4882a593Smuzhiyun !!(args->in.flags & HL_CB_FLAGS_MAP),
404*4882a593Smuzhiyun &handle);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun memset(args, 0, sizeof(*args));
408*4882a593Smuzhiyun args->out.cb_handle = handle;
409*4882a593Smuzhiyun break;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun case HL_CB_OP_DESTROY:
412*4882a593Smuzhiyun rc = hl_cb_destroy(hdev, &hpriv->cb_mgr,
413*4882a593Smuzhiyun args->in.cb_handle);
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun default:
417*4882a593Smuzhiyun rc = -ENOTTY;
418*4882a593Smuzhiyun break;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun return rc;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
cb_vm_close(struct vm_area_struct * vma)424*4882a593Smuzhiyun static void cb_vm_close(struct vm_area_struct *vma)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data;
427*4882a593Smuzhiyun long new_mmap_size;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun new_mmap_size = cb->mmap_size - (vma->vm_end - vma->vm_start);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (new_mmap_size > 0) {
432*4882a593Smuzhiyun cb->mmap_size = new_mmap_size;
433*4882a593Smuzhiyun return;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun spin_lock(&cb->lock);
437*4882a593Smuzhiyun cb->mmap = false;
438*4882a593Smuzhiyun spin_unlock(&cb->lock);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun hl_cb_put(cb);
441*4882a593Smuzhiyun vma->vm_private_data = NULL;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun static const struct vm_operations_struct cb_vm_ops = {
445*4882a593Smuzhiyun .close = cb_vm_close
446*4882a593Smuzhiyun };
447*4882a593Smuzhiyun
hl_cb_mmap(struct hl_fpriv * hpriv,struct vm_area_struct * vma)448*4882a593Smuzhiyun int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun struct hl_device *hdev = hpriv->hdev;
451*4882a593Smuzhiyun struct hl_cb *cb;
452*4882a593Smuzhiyun u32 handle, user_cb_size;
453*4882a593Smuzhiyun int rc;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* We use the page offset to hold the idr and thus we need to clear
456*4882a593Smuzhiyun * it before doing the mmap itself
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun handle = vma->vm_pgoff;
459*4882a593Smuzhiyun vma->vm_pgoff = 0;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /* reference was taken here */
462*4882a593Smuzhiyun cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle);
463*4882a593Smuzhiyun if (!cb) {
464*4882a593Smuzhiyun dev_err(hdev->dev,
465*4882a593Smuzhiyun "CB mmap failed, no match to handle 0x%x\n", handle);
466*4882a593Smuzhiyun return -EINVAL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Validation check */
470*4882a593Smuzhiyun user_cb_size = vma->vm_end - vma->vm_start;
471*4882a593Smuzhiyun if (user_cb_size != ALIGN(cb->size, PAGE_SIZE)) {
472*4882a593Smuzhiyun dev_err(hdev->dev,
473*4882a593Smuzhiyun "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n",
474*4882a593Smuzhiyun vma->vm_end - vma->vm_start, cb->size);
475*4882a593Smuzhiyun rc = -EINVAL;
476*4882a593Smuzhiyun goto put_cb;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (!access_ok((void __user *) (uintptr_t) vma->vm_start,
480*4882a593Smuzhiyun user_cb_size)) {
481*4882a593Smuzhiyun dev_err(hdev->dev,
482*4882a593Smuzhiyun "user pointer is invalid - 0x%lx\n",
483*4882a593Smuzhiyun vma->vm_start);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun rc = -EINVAL;
486*4882a593Smuzhiyun goto put_cb;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun spin_lock(&cb->lock);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun if (cb->mmap) {
492*4882a593Smuzhiyun dev_err(hdev->dev,
493*4882a593Smuzhiyun "CB mmap failed, CB already mmaped to user\n");
494*4882a593Smuzhiyun rc = -EINVAL;
495*4882a593Smuzhiyun goto release_lock;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun cb->mmap = true;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun spin_unlock(&cb->lock);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun vma->vm_ops = &cb_vm_ops;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun /*
505*4882a593Smuzhiyun * Note: We're transferring the cb reference to
506*4882a593Smuzhiyun * vma->vm_private_data here.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun vma->vm_private_data = cb;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address,
512*4882a593Smuzhiyun cb->bus_address, cb->size);
513*4882a593Smuzhiyun if (rc) {
514*4882a593Smuzhiyun spin_lock(&cb->lock);
515*4882a593Smuzhiyun cb->mmap = false;
516*4882a593Smuzhiyun goto release_lock;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun cb->mmap_size = cb->size;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun release_lock:
524*4882a593Smuzhiyun spin_unlock(&cb->lock);
525*4882a593Smuzhiyun put_cb:
526*4882a593Smuzhiyun hl_cb_put(cb);
527*4882a593Smuzhiyun return rc;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
hl_cb_get(struct hl_device * hdev,struct hl_cb_mgr * mgr,u32 handle)530*4882a593Smuzhiyun struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
531*4882a593Smuzhiyun u32 handle)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct hl_cb *cb;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun spin_lock(&mgr->cb_lock);
536*4882a593Smuzhiyun cb = idr_find(&mgr->cb_handles, handle);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!cb) {
539*4882a593Smuzhiyun spin_unlock(&mgr->cb_lock);
540*4882a593Smuzhiyun dev_warn(hdev->dev,
541*4882a593Smuzhiyun "CB get failed, no match to handle 0x%x\n", handle);
542*4882a593Smuzhiyun return NULL;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun kref_get(&cb->refcount);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun spin_unlock(&mgr->cb_lock);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun return cb;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
hl_cb_put(struct hl_cb * cb)553*4882a593Smuzhiyun void hl_cb_put(struct hl_cb *cb)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun kref_put(&cb->refcount, cb_release);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
hl_cb_mgr_init(struct hl_cb_mgr * mgr)558*4882a593Smuzhiyun void hl_cb_mgr_init(struct hl_cb_mgr *mgr)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun spin_lock_init(&mgr->cb_lock);
561*4882a593Smuzhiyun idr_init(&mgr->cb_handles);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
hl_cb_mgr_fini(struct hl_device * hdev,struct hl_cb_mgr * mgr)564*4882a593Smuzhiyun void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct hl_cb *cb;
567*4882a593Smuzhiyun struct idr *idp;
568*4882a593Smuzhiyun u32 id;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun idp = &mgr->cb_handles;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun idr_for_each_entry(idp, cb, id) {
573*4882a593Smuzhiyun if (kref_put(&cb->refcount, cb_release) != 1)
574*4882a593Smuzhiyun dev_err(hdev->dev,
575*4882a593Smuzhiyun "CB %d for CTX ID %d is still alive\n",
576*4882a593Smuzhiyun id, cb->ctx->asid);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun idr_destroy(&mgr->cb_handles);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
hl_cb_kernel_create(struct hl_device * hdev,u32 cb_size,bool internal_cb)582*4882a593Smuzhiyun struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
583*4882a593Smuzhiyun bool internal_cb)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun u64 cb_handle;
586*4882a593Smuzhiyun struct hl_cb *cb;
587*4882a593Smuzhiyun int rc;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, hdev->kernel_ctx, cb_size,
590*4882a593Smuzhiyun internal_cb, false, &cb_handle);
591*4882a593Smuzhiyun if (rc) {
592*4882a593Smuzhiyun dev_err(hdev->dev,
593*4882a593Smuzhiyun "Failed to allocate CB for the kernel driver %d\n", rc);
594*4882a593Smuzhiyun return NULL;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun cb_handle >>= PAGE_SHIFT;
598*4882a593Smuzhiyun cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle);
599*4882a593Smuzhiyun /* hl_cb_get should never fail here so use kernel WARN */
600*4882a593Smuzhiyun WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle);
601*4882a593Smuzhiyun if (!cb)
602*4882a593Smuzhiyun goto destroy_cb;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return cb;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun destroy_cb:
607*4882a593Smuzhiyun hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun return NULL;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
hl_cb_pool_init(struct hl_device * hdev)612*4882a593Smuzhiyun int hl_cb_pool_init(struct hl_device *hdev)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun struct hl_cb *cb;
615*4882a593Smuzhiyun int i;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun INIT_LIST_HEAD(&hdev->cb_pool);
618*4882a593Smuzhiyun spin_lock_init(&hdev->cb_pool_lock);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) {
621*4882a593Smuzhiyun cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size,
622*4882a593Smuzhiyun HL_KERNEL_ASID_ID, false);
623*4882a593Smuzhiyun if (cb) {
624*4882a593Smuzhiyun cb->is_pool = true;
625*4882a593Smuzhiyun list_add(&cb->pool_list, &hdev->cb_pool);
626*4882a593Smuzhiyun } else {
627*4882a593Smuzhiyun hl_cb_pool_fini(hdev);
628*4882a593Smuzhiyun return -ENOMEM;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun return 0;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
hl_cb_pool_fini(struct hl_device * hdev)635*4882a593Smuzhiyun int hl_cb_pool_fini(struct hl_device *hdev)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct hl_cb *cb, *tmp;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) {
640*4882a593Smuzhiyun list_del(&cb->pool_list);
641*4882a593Smuzhiyun cb_fini(hdev, cb);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
hl_cb_va_pool_init(struct hl_ctx * ctx)647*4882a593Smuzhiyun int hl_cb_va_pool_init(struct hl_ctx *ctx)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
650*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
651*4882a593Smuzhiyun int rc;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (!hdev->supports_cb_mapping)
654*4882a593Smuzhiyun return 0;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun ctx->cb_va_pool = gen_pool_create(__ffs(prop->pmmu.page_size), -1);
657*4882a593Smuzhiyun if (!ctx->cb_va_pool) {
658*4882a593Smuzhiyun dev_err(hdev->dev,
659*4882a593Smuzhiyun "Failed to create VA gen pool for CB mapping\n");
660*4882a593Smuzhiyun return -ENOMEM;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun rc = gen_pool_add(ctx->cb_va_pool, prop->cb_va_start_addr,
664*4882a593Smuzhiyun prop->cb_va_end_addr - prop->cb_va_start_addr, -1);
665*4882a593Smuzhiyun if (rc) {
666*4882a593Smuzhiyun dev_err(hdev->dev,
667*4882a593Smuzhiyun "Failed to add memory to VA gen pool for CB mapping\n");
668*4882a593Smuzhiyun goto err_pool_destroy;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun return 0;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun err_pool_destroy:
674*4882a593Smuzhiyun gen_pool_destroy(ctx->cb_va_pool);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun return rc;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
hl_cb_va_pool_fini(struct hl_ctx * ctx)679*4882a593Smuzhiyun void hl_cb_va_pool_fini(struct hl_ctx *ctx)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct hl_device *hdev = ctx->hdev;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (!hdev->supports_cb_mapping)
684*4882a593Smuzhiyun return;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun gen_pool_destroy(ctx->cb_va_pool);
687*4882a593Smuzhiyun }
688