1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/vmalloc.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "rxe.h"
14*4882a593Smuzhiyun #include "rxe_loc.h"
15*4882a593Smuzhiyun #include "rxe_queue.h"
16*4882a593Smuzhiyun
rxe_mmap_release(struct kref * ref)17*4882a593Smuzhiyun void rxe_mmap_release(struct kref *ref)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun struct rxe_mmap_info *ip = container_of(ref,
20*4882a593Smuzhiyun struct rxe_mmap_info, ref);
21*4882a593Smuzhiyun struct rxe_dev *rxe = to_rdev(ip->context->device);
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun spin_lock_bh(&rxe->pending_lock);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun if (!list_empty(&ip->pending_mmaps))
26*4882a593Smuzhiyun list_del(&ip->pending_mmaps);
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun spin_unlock_bh(&rxe->pending_lock);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun vfree(ip->obj); /* buf */
31*4882a593Smuzhiyun kfree(ip);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * open and close keep track of how many times the memory region is mapped,
36*4882a593Smuzhiyun * to avoid releasing it.
37*4882a593Smuzhiyun */
rxe_vma_open(struct vm_area_struct * vma)38*4882a593Smuzhiyun static void rxe_vma_open(struct vm_area_struct *vma)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct rxe_mmap_info *ip = vma->vm_private_data;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun kref_get(&ip->ref);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
rxe_vma_close(struct vm_area_struct * vma)45*4882a593Smuzhiyun static void rxe_vma_close(struct vm_area_struct *vma)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun struct rxe_mmap_info *ip = vma->vm_private_data;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun kref_put(&ip->ref, rxe_mmap_release);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun static const struct vm_operations_struct rxe_vm_ops = {
53*4882a593Smuzhiyun .open = rxe_vma_open,
54*4882a593Smuzhiyun .close = rxe_vma_close,
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun * rxe_mmap - create a new mmap region
59*4882a593Smuzhiyun * @context: the IB user context of the process making the mmap() call
60*4882a593Smuzhiyun * @vma: the VMA to be initialized
61*4882a593Smuzhiyun * Return zero if the mmap is OK. Otherwise, return an errno.
62*4882a593Smuzhiyun */
rxe_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)63*4882a593Smuzhiyun int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct rxe_dev *rxe = to_rdev(context->device);
66*4882a593Smuzhiyun unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
67*4882a593Smuzhiyun unsigned long size = vma->vm_end - vma->vm_start;
68*4882a593Smuzhiyun struct rxe_mmap_info *ip, *pp;
69*4882a593Smuzhiyun int ret;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /*
72*4882a593Smuzhiyun * Search the device's list of objects waiting for a mmap call.
73*4882a593Smuzhiyun * Normally, this list is very short since a call to create a
74*4882a593Smuzhiyun * CQ, QP, or SRQ is soon followed by a call to mmap().
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun spin_lock_bh(&rxe->pending_lock);
77*4882a593Smuzhiyun list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) {
78*4882a593Smuzhiyun if (context != ip->context || (__u64)offset != ip->info.offset)
79*4882a593Smuzhiyun continue;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Don't allow a mmap larger than the object. */
82*4882a593Smuzhiyun if (size > ip->info.size) {
83*4882a593Smuzhiyun pr_err("mmap region is larger than the object!\n");
84*4882a593Smuzhiyun spin_unlock_bh(&rxe->pending_lock);
85*4882a593Smuzhiyun ret = -EINVAL;
86*4882a593Smuzhiyun goto done;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun goto found_it;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun pr_warn("unable to find pending mmap info\n");
92*4882a593Smuzhiyun spin_unlock_bh(&rxe->pending_lock);
93*4882a593Smuzhiyun ret = -EINVAL;
94*4882a593Smuzhiyun goto done;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun found_it:
97*4882a593Smuzhiyun list_del_init(&ip->pending_mmaps);
98*4882a593Smuzhiyun spin_unlock_bh(&rxe->pending_lock);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun ret = remap_vmalloc_range(vma, ip->obj, 0);
101*4882a593Smuzhiyun if (ret) {
102*4882a593Smuzhiyun pr_err("err %d from remap_vmalloc_range\n", ret);
103*4882a593Smuzhiyun goto done;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun vma->vm_ops = &rxe_vm_ops;
107*4882a593Smuzhiyun vma->vm_private_data = ip;
108*4882a593Smuzhiyun rxe_vma_open(vma);
109*4882a593Smuzhiyun done:
110*4882a593Smuzhiyun return ret;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * Allocate information for rxe_mmap
115*4882a593Smuzhiyun */
rxe_create_mmap_info(struct rxe_dev * rxe,u32 size,struct ib_udata * udata,void * obj)116*4882a593Smuzhiyun struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
117*4882a593Smuzhiyun struct ib_udata *udata, void *obj)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun struct rxe_mmap_info *ip;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!udata)
122*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun ip = kmalloc(sizeof(*ip), GFP_KERNEL);
125*4882a593Smuzhiyun if (!ip)
126*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun size = PAGE_ALIGN(size);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_lock_bh(&rxe->mmap_offset_lock);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (rxe->mmap_offset == 0)
133*4882a593Smuzhiyun rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun ip->info.offset = rxe->mmap_offset;
136*4882a593Smuzhiyun rxe->mmap_offset += ALIGN(size, SHMLBA);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun spin_unlock_bh(&rxe->mmap_offset_lock);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun INIT_LIST_HEAD(&ip->pending_mmaps);
141*4882a593Smuzhiyun ip->info.size = size;
142*4882a593Smuzhiyun ip->context =
143*4882a593Smuzhiyun container_of(udata, struct uverbs_attr_bundle, driver_udata)
144*4882a593Smuzhiyun ->context;
145*4882a593Smuzhiyun ip->obj = obj;
146*4882a593Smuzhiyun kref_init(&ip->ref);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return ip;
149*4882a593Smuzhiyun }
150