1 /*
2 * Driver for /dev/crypto device (aka CryptoDev)
3 *
4 * Copyright (c) 2009-2013 Nikos Mavrogiannopoulos <nmav@gnutls.org>
5 * Copyright (c) 2010 Phil Sutter
6 * Copyright (c) 2011, 2012 OpenSSL Software Foundation, Inc.
7 *
8 * This file is part of linux cryptodev.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * 02110-1301, USA.
24 */
25
26 #include <crypto/hash.h>
27 #include <linux/crypto.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/ioctl.h>
31 #include <linux/random.h>
32 #include <linux/syscalls.h>
33 #include <linux/pagemap.h>
34 #include <linux/uaccess.h>
35 #include <crypto/scatterwalk.h>
36 #include <linux/scatterlist.h>
37 #include "cryptodev.h"
38 #include "zc.h"
39 #include "version.h"
40
41 /* Helper functions to assist zero copy.
42 * This needs to be redesigned and moved out of the session. --nmav
43 */
44
45 /* offset of buf in it's first page */
46 #define PAGEOFFSET(buf) ((unsigned long)buf & ~PAGE_MASK)
47
48 /* fetch the pages addr resides in into pg and initialise sg with them */
__cryptodev_get_userbuf(uint8_t __user * addr,uint32_t len,int write,unsigned int pgcount,struct page ** pg,struct scatterlist * sg,struct task_struct * task,struct mm_struct * mm)49 int __cryptodev_get_userbuf(uint8_t __user *addr, uint32_t len, int write,
50 unsigned int pgcount, struct page **pg, struct scatterlist *sg,
51 struct task_struct *task, struct mm_struct *mm)
52 {
53 int ret, pglen, i = 0;
54 struct scatterlist *sgp;
55
56 if (unlikely(!pgcount || !len || !addr)) {
57 sg_mark_end(sg);
58 return 0;
59 }
60
61 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
62 down_read(&mm->mmap_sem);
63 #else
64 mmap_read_lock(mm);
65 #endif
66 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 168))
67 ret = get_user_pages(task, mm,
68 (unsigned long)addr, pgcount, write, 0, pg, NULL);
69 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
70 ret = get_user_pages(task, mm,
71 (unsigned long)addr, pgcount, write, pg, NULL);
72 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
73 ret = get_user_pages_remote(task, mm,
74 (unsigned long)addr, pgcount, write, 0, pg, NULL);
75 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
76 ret = get_user_pages_remote(task, mm,
77 (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
78 pg, NULL);
79 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0))
80 ret = get_user_pages_remote(task, mm,
81 (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
82 pg, NULL, NULL);
83 #else
84 ret = get_user_pages_remote(mm,
85 (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0,
86 pg, NULL, NULL);
87 #endif
88 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0))
89 up_read(&mm->mmap_sem);
90 #else
91 mmap_read_unlock(mm);
92 #endif
93 if (ret != pgcount)
94 return -EINVAL;
95
96 sg_init_table(sg, pgcount);
97
98 pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
99 sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));
100
101 len -= pglen;
102 for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
103 pglen = min((uint32_t)PAGE_SIZE, len);
104 sg_set_page(sgp, pg[i++], pglen, 0);
105 len -= pglen;
106 }
107 sg_mark_end(sg_last(sg, pgcount));
108 return 0;
109 }
110
cryptodev_adjust_sg_array(struct csession * ses,int pagecount)111 int cryptodev_adjust_sg_array(struct csession *ses, int pagecount)
112 {
113 struct scatterlist *sg;
114 struct page **pages;
115 int array_size;
116
117 for (array_size = ses->array_size; array_size < pagecount;
118 array_size *= 2)
119 ;
120 ddebug(1, "reallocating from %d to %d pages",
121 ses->array_size, array_size);
122 pages = krealloc(ses->pages, array_size * sizeof(struct page *),
123 GFP_KERNEL);
124 if (unlikely(!pages))
125 return -ENOMEM;
126 ses->pages = pages;
127 sg = krealloc(ses->sg, array_size * sizeof(struct scatterlist),
128 GFP_KERNEL);
129 if (unlikely(!sg))
130 return -ENOMEM;
131 ses->sg = sg;
132 ses->array_size = array_size;
133
134 return 0;
135 }
136
cryptodev_release_user_pages(struct csession * ses)137 void cryptodev_release_user_pages(struct csession *ses)
138 {
139 unsigned int i;
140
141 for (i = 0; i < ses->used_pages; i++) {
142 if (!PageReserved(ses->pages[i]))
143 SetPageDirty(ses->pages[i]);
144
145 if (ses->readonly_pages == 0)
146 flush_dcache_page(ses->pages[i]);
147 else
148 ses->readonly_pages--;
149
150 put_page(ses->pages[i]);
151 }
152 ses->used_pages = 0;
153 }
154
155 /* make src and dst available in scatterlists.
156 * dst might be the same as src.
157 */
cryptodev_get_userbuf(struct csession * ses,void * __user src,unsigned int src_len,void * __user dst,unsigned int dst_len,struct task_struct * task,struct mm_struct * mm,struct scatterlist ** src_sg,struct scatterlist ** dst_sg)158 int cryptodev_get_userbuf(struct csession *ses,
159 void *__user src, unsigned int src_len,
160 void *__user dst, unsigned int dst_len,
161 struct task_struct *task, struct mm_struct *mm,
162 struct scatterlist **src_sg,
163 struct scatterlist **dst_sg)
164 {
165 int src_pagecount, dst_pagecount;
166 int rc;
167
168 /* Empty input is a valid option to many algorithms & is tested by NIST/FIPS */
169 /* Make sure NULL input has 0 length */
170 if (!src && src_len)
171 src_len = 0;
172
173 /* I don't know that null output is ever useful, but we can handle it gracefully */
174 /* Make sure NULL output has 0 length */
175 if (!dst && dst_len)
176 dst_len = 0;
177
178 src_pagecount = PAGECOUNT(src, src_len);
179 dst_pagecount = PAGECOUNT(dst, dst_len);
180
181 ses->used_pages = (src == dst) ? max(src_pagecount, dst_pagecount)
182 : src_pagecount + dst_pagecount;
183
184 ses->readonly_pages = (src == dst) ? 0 : src_pagecount;
185
186 if (ses->used_pages > ses->array_size) {
187 rc = cryptodev_adjust_sg_array(ses, ses->used_pages);
188 if (rc)
189 return rc;
190 }
191
192 if (src == dst) { /* inplace operation */
193 /* When we encrypt for authenc modes we need to write
194 * more data than the ones we read. */
195 if (src_len < dst_len)
196 src_len = dst_len;
197 rc = __cryptodev_get_userbuf(src, src_len, 1, ses->used_pages,
198 ses->pages, ses->sg, task, mm);
199 if (unlikely(rc)) {
200 derr(1, "failed to get user pages for data IO");
201 return rc;
202 }
203 (*src_sg) = (*dst_sg) = ses->sg;
204 return 0;
205 }
206
207 *src_sg = NULL; /* default to no input */
208 *dst_sg = NULL; /* default to ignore output */
209
210 if (likely(src)) {
211 rc = __cryptodev_get_userbuf(src, src_len, 0, ses->readonly_pages,
212 ses->pages, ses->sg, task, mm);
213 if (unlikely(rc)) {
214 derr(1, "failed to get user pages for data input");
215 return rc;
216 }
217 *src_sg = ses->sg;
218 }
219
220 if (likely(dst)) {
221 const unsigned int writable_pages =
222 ses->used_pages - ses->readonly_pages;
223 struct page **dst_pages = ses->pages + ses->readonly_pages;
224 *dst_sg = ses->sg + ses->readonly_pages;
225
226 rc = __cryptodev_get_userbuf(dst, dst_len, 1, writable_pages,
227 dst_pages, *dst_sg, task, mm);
228 if (unlikely(rc)) {
229 derr(1, "failed to get user pages for data output");
230 cryptodev_release_user_pages(ses); /* FIXME: use __release_userbuf(src, ...) */
231 return rc;
232 }
233 }
234 return 0;
235 }
236