1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2016 Intel Corporation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
5*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * BSD LICENSE
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun * are met:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun * the documentation and/or other materials provided with the
29*4882a593Smuzhiyun * distribution.
30*4882a593Smuzhiyun * - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun * from this software without specific prior written permission.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <linux/slab.h>
49*4882a593Smuzhiyun #include <linux/vmalloc.h>
50*4882a593Smuzhiyun #include <linux/mm.h>
51*4882a593Smuzhiyun #include <rdma/uverbs_ioctl.h>
52*4882a593Smuzhiyun #include "mmap.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun * rvt_mmap_init - init link list and lock for mem map
56*4882a593Smuzhiyun * @rdi: rvt dev struct
57*4882a593Smuzhiyun */
rvt_mmap_init(struct rvt_dev_info * rdi)58*4882a593Smuzhiyun void rvt_mmap_init(struct rvt_dev_info *rdi)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun INIT_LIST_HEAD(&rdi->pending_mmaps);
61*4882a593Smuzhiyun spin_lock_init(&rdi->pending_lock);
62*4882a593Smuzhiyun rdi->mmap_offset = PAGE_SIZE;
63*4882a593Smuzhiyun spin_lock_init(&rdi->mmap_offset_lock);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun * rvt_release_mmap_info - free mmap info structure
68*4882a593Smuzhiyun * @ref: a pointer to the kref within struct rvt_mmap_info
69*4882a593Smuzhiyun */
rvt_release_mmap_info(struct kref * ref)70*4882a593Smuzhiyun void rvt_release_mmap_info(struct kref *ref)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct rvt_mmap_info *ip =
73*4882a593Smuzhiyun container_of(ref, struct rvt_mmap_info, ref);
74*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(ip->context->device);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun spin_lock_irq(&rdi->pending_lock);
77*4882a593Smuzhiyun list_del(&ip->pending_mmaps);
78*4882a593Smuzhiyun spin_unlock_irq(&rdi->pending_lock);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun vfree(ip->obj);
81*4882a593Smuzhiyun kfree(ip);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
rvt_vma_open(struct vm_area_struct * vma)84*4882a593Smuzhiyun static void rvt_vma_open(struct vm_area_struct *vma)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct rvt_mmap_info *ip = vma->vm_private_data;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun kref_get(&ip->ref);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
rvt_vma_close(struct vm_area_struct * vma)91*4882a593Smuzhiyun static void rvt_vma_close(struct vm_area_struct *vma)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct rvt_mmap_info *ip = vma->vm_private_data;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun kref_put(&ip->ref, rvt_release_mmap_info);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static const struct vm_operations_struct rvt_vm_ops = {
99*4882a593Smuzhiyun .open = rvt_vma_open,
100*4882a593Smuzhiyun .close = rvt_vma_close,
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun * rvt_mmap - create a new mmap region
105*4882a593Smuzhiyun * @context: the IB user context of the process making the mmap() call
106*4882a593Smuzhiyun * @vma: the VMA to be initialized
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * Return: zero if the mmap is OK. Otherwise, return an errno.
109*4882a593Smuzhiyun */
rvt_mmap(struct ib_ucontext * context,struct vm_area_struct * vma)110*4882a593Smuzhiyun int rvt_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct rvt_dev_info *rdi = ib_to_rvt(context->device);
113*4882a593Smuzhiyun unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
114*4882a593Smuzhiyun unsigned long size = vma->vm_end - vma->vm_start;
115*4882a593Smuzhiyun struct rvt_mmap_info *ip, *pp;
116*4882a593Smuzhiyun int ret = -EINVAL;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * Search the device's list of objects waiting for a mmap call.
120*4882a593Smuzhiyun * Normally, this list is very short since a call to create a
121*4882a593Smuzhiyun * CQ, QP, or SRQ is soon followed by a call to mmap().
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun spin_lock_irq(&rdi->pending_lock);
124*4882a593Smuzhiyun list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps,
125*4882a593Smuzhiyun pending_mmaps) {
126*4882a593Smuzhiyun /* Only the creator is allowed to mmap the object */
127*4882a593Smuzhiyun if (context != ip->context || (__u64)offset != ip->offset)
128*4882a593Smuzhiyun continue;
129*4882a593Smuzhiyun /* Don't allow a mmap larger than the object. */
130*4882a593Smuzhiyun if (size > ip->size)
131*4882a593Smuzhiyun break;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun list_del_init(&ip->pending_mmaps);
134*4882a593Smuzhiyun spin_unlock_irq(&rdi->pending_lock);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun ret = remap_vmalloc_range(vma, ip->obj, 0);
137*4882a593Smuzhiyun if (ret)
138*4882a593Smuzhiyun goto done;
139*4882a593Smuzhiyun vma->vm_ops = &rvt_vm_ops;
140*4882a593Smuzhiyun vma->vm_private_data = ip;
141*4882a593Smuzhiyun rvt_vma_open(vma);
142*4882a593Smuzhiyun goto done;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun spin_unlock_irq(&rdi->pending_lock);
145*4882a593Smuzhiyun done:
146*4882a593Smuzhiyun return ret;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun * rvt_create_mmap_info - allocate information for hfi1_mmap
151*4882a593Smuzhiyun * @rdi: rvt dev struct
152*4882a593Smuzhiyun * @size: size in bytes to map
153*4882a593Smuzhiyun * @udata: user data (must be valid!)
154*4882a593Smuzhiyun * @obj: opaque pointer to a cq, wq etc
155*4882a593Smuzhiyun *
156*4882a593Smuzhiyun * Return: rvt_mmap struct on success, ERR_PTR on failure
157*4882a593Smuzhiyun */
rvt_create_mmap_info(struct rvt_dev_info * rdi,u32 size,struct ib_udata * udata,void * obj)158*4882a593Smuzhiyun struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, u32 size,
159*4882a593Smuzhiyun struct ib_udata *udata, void *obj)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct rvt_mmap_info *ip;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (!udata)
164*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun ip = kmalloc_node(sizeof(*ip), GFP_KERNEL, rdi->dparms.node);
167*4882a593Smuzhiyun if (!ip)
168*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun size = PAGE_ALIGN(size);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun spin_lock_irq(&rdi->mmap_offset_lock);
173*4882a593Smuzhiyun if (rdi->mmap_offset == 0)
174*4882a593Smuzhiyun rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA);
175*4882a593Smuzhiyun ip->offset = rdi->mmap_offset;
176*4882a593Smuzhiyun rdi->mmap_offset += ALIGN(size, SHMLBA);
177*4882a593Smuzhiyun spin_unlock_irq(&rdi->mmap_offset_lock);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun INIT_LIST_HEAD(&ip->pending_mmaps);
180*4882a593Smuzhiyun ip->size = size;
181*4882a593Smuzhiyun ip->context =
182*4882a593Smuzhiyun container_of(udata, struct uverbs_attr_bundle, driver_udata)
183*4882a593Smuzhiyun ->context;
184*4882a593Smuzhiyun ip->obj = obj;
185*4882a593Smuzhiyun kref_init(&ip->ref);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun return ip;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * rvt_update_mmap_info - update a mem map
192*4882a593Smuzhiyun * @rdi: rvt dev struct
193*4882a593Smuzhiyun * @ip: mmap info pointer
194*4882a593Smuzhiyun * @size: size to grow by
195*4882a593Smuzhiyun * @obj: opaque pointer to cq, wq, etc.
196*4882a593Smuzhiyun */
rvt_update_mmap_info(struct rvt_dev_info * rdi,struct rvt_mmap_info * ip,u32 size,void * obj)197*4882a593Smuzhiyun void rvt_update_mmap_info(struct rvt_dev_info *rdi, struct rvt_mmap_info *ip,
198*4882a593Smuzhiyun u32 size, void *obj)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun size = PAGE_ALIGN(size);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun spin_lock_irq(&rdi->mmap_offset_lock);
203*4882a593Smuzhiyun if (rdi->mmap_offset == 0)
204*4882a593Smuzhiyun rdi->mmap_offset = PAGE_SIZE;
205*4882a593Smuzhiyun ip->offset = rdi->mmap_offset;
206*4882a593Smuzhiyun rdi->mmap_offset += size;
207*4882a593Smuzhiyun spin_unlock_irq(&rdi->mmap_offset_lock);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun ip->size = size;
210*4882a593Smuzhiyun ip->obj = obj;
211*4882a593Smuzhiyun }
212