1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/kernel.h>
3*4882a593Smuzhiyun #include <linux/fs.h>
4*4882a593Smuzhiyun #include <linux/semaphore.h>
5*4882a593Smuzhiyun #include <linux/slab.h>
6*4882a593Smuzhiyun #include <linux/uaccess.h>
7*4882a593Smuzhiyun #include <asm/rtas.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "cxl.h"
10*4882a593Smuzhiyun #include "hcalls.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define DOWNLOAD_IMAGE 1
13*4882a593Smuzhiyun #define VALIDATE_IMAGE 2
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun struct ai_header {
16*4882a593Smuzhiyun u16 version;
17*4882a593Smuzhiyun u8 reserved0[6];
18*4882a593Smuzhiyun u16 vendor;
19*4882a593Smuzhiyun u16 device;
20*4882a593Smuzhiyun u16 subsystem_vendor;
21*4882a593Smuzhiyun u16 subsystem;
22*4882a593Smuzhiyun u64 image_offset;
23*4882a593Smuzhiyun u64 image_length;
24*4882a593Smuzhiyun u8 reserved1[96];
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct semaphore sem;
28*4882a593Smuzhiyun static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
29*4882a593Smuzhiyun static struct sg_list *le;
30*4882a593Smuzhiyun static u64 continue_token;
31*4882a593Smuzhiyun static unsigned int transfer;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct update_props_workarea {
34*4882a593Smuzhiyun __be32 phandle;
35*4882a593Smuzhiyun __be32 state;
36*4882a593Smuzhiyun __be64 reserved;
37*4882a593Smuzhiyun __be32 nprops;
38*4882a593Smuzhiyun } __packed;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct update_nodes_workarea {
41*4882a593Smuzhiyun __be32 state;
42*4882a593Smuzhiyun __be64 unit_address;
43*4882a593Smuzhiyun __be32 reserved;
44*4882a593Smuzhiyun } __packed;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define DEVICE_SCOPE 3
47*4882a593Smuzhiyun #define NODE_ACTION_MASK 0xff000000
48*4882a593Smuzhiyun #define NODE_COUNT_MASK 0x00ffffff
49*4882a593Smuzhiyun #define OPCODE_DELETE 0x01000000
50*4882a593Smuzhiyun #define OPCODE_UPDATE 0x02000000
51*4882a593Smuzhiyun #define OPCODE_ADD 0x03000000
52*4882a593Smuzhiyun
rcall(int token,char * buf,s32 scope)53*4882a593Smuzhiyun static int rcall(int token, char *buf, s32 scope)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun int rc;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun spin_lock(&rtas_data_buf_lock);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
60*4882a593Smuzhiyun rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
61*4882a593Smuzhiyun memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun spin_unlock(&rtas_data_buf_lock);
64*4882a593Smuzhiyun return rc;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
update_property(struct device_node * dn,const char * name,u32 vd,char * value)67*4882a593Smuzhiyun static int update_property(struct device_node *dn, const char *name,
68*4882a593Smuzhiyun u32 vd, char *value)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct property *new_prop;
71*4882a593Smuzhiyun u32 *val;
72*4882a593Smuzhiyun int rc;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
75*4882a593Smuzhiyun if (!new_prop)
76*4882a593Smuzhiyun return -ENOMEM;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun new_prop->name = kstrdup(name, GFP_KERNEL);
79*4882a593Smuzhiyun if (!new_prop->name) {
80*4882a593Smuzhiyun kfree(new_prop);
81*4882a593Smuzhiyun return -ENOMEM;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun new_prop->length = vd;
85*4882a593Smuzhiyun new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
86*4882a593Smuzhiyun if (!new_prop->value) {
87*4882a593Smuzhiyun kfree(new_prop->name);
88*4882a593Smuzhiyun kfree(new_prop);
89*4882a593Smuzhiyun return -ENOMEM;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun memcpy(new_prop->value, value, vd);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun val = (u32 *)new_prop->value;
94*4882a593Smuzhiyun rc = cxl_update_properties(dn, new_prop);
95*4882a593Smuzhiyun pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
96*4882a593Smuzhiyun dn, name, vd, be32_to_cpu(*val));
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun if (rc) {
99*4882a593Smuzhiyun kfree(new_prop->name);
100*4882a593Smuzhiyun kfree(new_prop->value);
101*4882a593Smuzhiyun kfree(new_prop);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun return rc;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
update_node(__be32 phandle,s32 scope)106*4882a593Smuzhiyun static int update_node(__be32 phandle, s32 scope)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct update_props_workarea *upwa;
109*4882a593Smuzhiyun struct device_node *dn;
110*4882a593Smuzhiyun int i, rc, ret;
111*4882a593Smuzhiyun char *prop_data;
112*4882a593Smuzhiyun char *buf;
113*4882a593Smuzhiyun int token;
114*4882a593Smuzhiyun u32 nprops;
115*4882a593Smuzhiyun u32 vd;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun token = rtas_token("ibm,update-properties");
118*4882a593Smuzhiyun if (token == RTAS_UNKNOWN_SERVICE)
119*4882a593Smuzhiyun return -EINVAL;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
122*4882a593Smuzhiyun if (!buf)
123*4882a593Smuzhiyun return -ENOMEM;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun dn = of_find_node_by_phandle(be32_to_cpu(phandle));
126*4882a593Smuzhiyun if (!dn) {
127*4882a593Smuzhiyun kfree(buf);
128*4882a593Smuzhiyun return -ENOENT;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun upwa = (struct update_props_workarea *)&buf[0];
132*4882a593Smuzhiyun upwa->phandle = phandle;
133*4882a593Smuzhiyun do {
134*4882a593Smuzhiyun rc = rcall(token, buf, scope);
135*4882a593Smuzhiyun if (rc < 0)
136*4882a593Smuzhiyun break;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun prop_data = buf + sizeof(*upwa);
139*4882a593Smuzhiyun nprops = be32_to_cpu(upwa->nprops);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (*prop_data == 0) {
142*4882a593Smuzhiyun prop_data++;
143*4882a593Smuzhiyun vd = be32_to_cpu(*(__be32 *)prop_data);
144*4882a593Smuzhiyun prop_data += vd + sizeof(vd);
145*4882a593Smuzhiyun nprops--;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun for (i = 0; i < nprops; i++) {
149*4882a593Smuzhiyun char *prop_name;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun prop_name = prop_data;
152*4882a593Smuzhiyun prop_data += strlen(prop_name) + 1;
153*4882a593Smuzhiyun vd = be32_to_cpu(*(__be32 *)prop_data);
154*4882a593Smuzhiyun prop_data += sizeof(vd);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if ((vd != 0x00000000) && (vd != 0x80000000)) {
157*4882a593Smuzhiyun ret = update_property(dn, prop_name, vd,
158*4882a593Smuzhiyun prop_data);
159*4882a593Smuzhiyun if (ret)
160*4882a593Smuzhiyun pr_err("cxl: Could not update property %s - %i\n",
161*4882a593Smuzhiyun prop_name, ret);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun prop_data += vd;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun } while (rc == 1);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun of_node_put(dn);
169*4882a593Smuzhiyun kfree(buf);
170*4882a593Smuzhiyun return rc;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
update_devicetree(struct cxl * adapter,s32 scope)173*4882a593Smuzhiyun static int update_devicetree(struct cxl *adapter, s32 scope)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct update_nodes_workarea *unwa;
176*4882a593Smuzhiyun u32 action, node_count;
177*4882a593Smuzhiyun int token, rc, i;
178*4882a593Smuzhiyun __be32 *data, phandle;
179*4882a593Smuzhiyun char *buf;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun token = rtas_token("ibm,update-nodes");
182*4882a593Smuzhiyun if (token == RTAS_UNKNOWN_SERVICE)
183*4882a593Smuzhiyun return -EINVAL;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
186*4882a593Smuzhiyun if (!buf)
187*4882a593Smuzhiyun return -ENOMEM;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun unwa = (struct update_nodes_workarea *)&buf[0];
190*4882a593Smuzhiyun unwa->unit_address = cpu_to_be64(adapter->guest->handle);
191*4882a593Smuzhiyun do {
192*4882a593Smuzhiyun rc = rcall(token, buf, scope);
193*4882a593Smuzhiyun if (rc && rc != 1)
194*4882a593Smuzhiyun break;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun data = (__be32 *)buf + 4;
197*4882a593Smuzhiyun while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
198*4882a593Smuzhiyun action = be32_to_cpu(*data) & NODE_ACTION_MASK;
199*4882a593Smuzhiyun node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
200*4882a593Smuzhiyun pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
201*4882a593Smuzhiyun action, node_count);
202*4882a593Smuzhiyun data++;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun for (i = 0; i < node_count; i++) {
205*4882a593Smuzhiyun phandle = *data++;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun switch (action) {
208*4882a593Smuzhiyun case OPCODE_DELETE:
209*4882a593Smuzhiyun /* nothing to do */
210*4882a593Smuzhiyun break;
211*4882a593Smuzhiyun case OPCODE_UPDATE:
212*4882a593Smuzhiyun update_node(phandle, scope);
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun case OPCODE_ADD:
215*4882a593Smuzhiyun /* nothing to do, just move pointer */
216*4882a593Smuzhiyun data++;
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun } while (rc == 1);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun kfree(buf);
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
handle_image(struct cxl * adapter,int operation,long (* fct)(u64,u64,u64,u64 *),struct cxl_adapter_image * ai)227*4882a593Smuzhiyun static int handle_image(struct cxl *adapter, int operation,
228*4882a593Smuzhiyun long (*fct)(u64, u64, u64, u64 *),
229*4882a593Smuzhiyun struct cxl_adapter_image *ai)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun size_t mod, s_copy, len_chunk = 0;
232*4882a593Smuzhiyun struct ai_header *header = NULL;
233*4882a593Smuzhiyun unsigned int entries = 0, i;
234*4882a593Smuzhiyun void *dest, *from;
235*4882a593Smuzhiyun int rc = 0, need_header;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* base adapter image header */
238*4882a593Smuzhiyun need_header = (ai->flags & CXL_AI_NEED_HEADER);
239*4882a593Smuzhiyun if (need_header) {
240*4882a593Smuzhiyun header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
241*4882a593Smuzhiyun if (!header)
242*4882a593Smuzhiyun return -ENOMEM;
243*4882a593Smuzhiyun header->version = cpu_to_be16(1);
244*4882a593Smuzhiyun header->vendor = cpu_to_be16(adapter->guest->vendor);
245*4882a593Smuzhiyun header->device = cpu_to_be16(adapter->guest->device);
246*4882a593Smuzhiyun header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
247*4882a593Smuzhiyun header->subsystem = cpu_to_be16(adapter->guest->subsystem);
248*4882a593Smuzhiyun header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
249*4882a593Smuzhiyun header->image_length = cpu_to_be64(ai->len_image);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* number of entries in the list */
253*4882a593Smuzhiyun len_chunk = ai->len_data;
254*4882a593Smuzhiyun if (need_header)
255*4882a593Smuzhiyun len_chunk += CXL_AI_HEADER_SIZE;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun entries = len_chunk / CXL_AI_BUFFER_SIZE;
258*4882a593Smuzhiyun mod = len_chunk % CXL_AI_BUFFER_SIZE;
259*4882a593Smuzhiyun if (mod)
260*4882a593Smuzhiyun entries++;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (entries > CXL_AI_MAX_ENTRIES) {
263*4882a593Smuzhiyun rc = -EINVAL;
264*4882a593Smuzhiyun goto err;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
268*4882a593Smuzhiyun * chunk 0 ----------------------------------------------------
269*4882a593Smuzhiyun * | header | data |
270*4882a593Smuzhiyun * ----------------------------------------------------
271*4882a593Smuzhiyun * chunk 1 ----------------------------------------------------
272*4882a593Smuzhiyun * | data |
273*4882a593Smuzhiyun * ----------------------------------------------------
274*4882a593Smuzhiyun * ....
275*4882a593Smuzhiyun * chunk n ----------------------------------------------------
276*4882a593Smuzhiyun * | data |
277*4882a593Smuzhiyun * ----------------------------------------------------
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun from = (void *) ai->data;
280*4882a593Smuzhiyun for (i = 0; i < entries; i++) {
281*4882a593Smuzhiyun dest = buffer[i];
282*4882a593Smuzhiyun s_copy = CXL_AI_BUFFER_SIZE;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if ((need_header) && (i == 0)) {
285*4882a593Smuzhiyun /* add adapter image header */
286*4882a593Smuzhiyun memcpy(buffer[i], header, sizeof(struct ai_header));
287*4882a593Smuzhiyun s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
288*4882a593Smuzhiyun dest += CXL_AI_HEADER_SIZE; /* image offset */
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun if ((i == (entries - 1)) && mod)
291*4882a593Smuzhiyun s_copy = mod;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* copy data */
294*4882a593Smuzhiyun if (copy_from_user(dest, from, s_copy))
295*4882a593Smuzhiyun goto err;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* fill in the list */
298*4882a593Smuzhiyun le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
299*4882a593Smuzhiyun le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
300*4882a593Smuzhiyun if ((i == (entries - 1)) && mod)
301*4882a593Smuzhiyun le[i].len = cpu_to_be64(mod);
302*4882a593Smuzhiyun from += s_copy;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
305*4882a593Smuzhiyun __func__, operation, need_header, entries, continue_token);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun * download/validate the adapter image to the coherent
309*4882a593Smuzhiyun * platform facility
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
312*4882a593Smuzhiyun &continue_token);
313*4882a593Smuzhiyun if (rc == 0) /* success of download/validation operation */
314*4882a593Smuzhiyun continue_token = 0;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun err:
317*4882a593Smuzhiyun kfree(header);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun return rc;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
transfer_image(struct cxl * adapter,int operation,struct cxl_adapter_image * ai)322*4882a593Smuzhiyun static int transfer_image(struct cxl *adapter, int operation,
323*4882a593Smuzhiyun struct cxl_adapter_image *ai)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun int rc = 0;
326*4882a593Smuzhiyun int afu;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun switch (operation) {
329*4882a593Smuzhiyun case DOWNLOAD_IMAGE:
330*4882a593Smuzhiyun rc = handle_image(adapter, operation,
331*4882a593Smuzhiyun &cxl_h_download_adapter_image, ai);
332*4882a593Smuzhiyun if (rc < 0) {
333*4882a593Smuzhiyun pr_devel("resetting adapter\n");
334*4882a593Smuzhiyun cxl_h_reset_adapter(adapter->guest->handle);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun return rc;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun case VALIDATE_IMAGE:
339*4882a593Smuzhiyun rc = handle_image(adapter, operation,
340*4882a593Smuzhiyun &cxl_h_validate_adapter_image, ai);
341*4882a593Smuzhiyun if (rc < 0) {
342*4882a593Smuzhiyun pr_devel("resetting adapter\n");
343*4882a593Smuzhiyun cxl_h_reset_adapter(adapter->guest->handle);
344*4882a593Smuzhiyun return rc;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun if (rc == 0) {
347*4882a593Smuzhiyun pr_devel("remove current afu\n");
348*4882a593Smuzhiyun for (afu = 0; afu < adapter->slices; afu++)
349*4882a593Smuzhiyun cxl_guest_remove_afu(adapter->afu[afu]);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun pr_devel("resetting adapter\n");
352*4882a593Smuzhiyun cxl_h_reset_adapter(adapter->guest->handle);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* The entire image has now been
355*4882a593Smuzhiyun * downloaded and the validation has
356*4882a593Smuzhiyun * been successfully performed.
357*4882a593Smuzhiyun * After that, the partition should call
358*4882a593Smuzhiyun * ibm,update-nodes and
359*4882a593Smuzhiyun * ibm,update-properties to receive the
360*4882a593Smuzhiyun * current configuration
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun rc = update_devicetree(adapter, DEVICE_SCOPE);
363*4882a593Smuzhiyun transfer = 1;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun return rc;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return -EINVAL;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
ioctl_transfer_image(struct cxl * adapter,int operation,struct cxl_adapter_image __user * uai)371*4882a593Smuzhiyun static long ioctl_transfer_image(struct cxl *adapter, int operation,
372*4882a593Smuzhiyun struct cxl_adapter_image __user *uai)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct cxl_adapter_image ai;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun pr_devel("%s\n", __func__);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
379*4882a593Smuzhiyun return -EFAULT;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun * Make sure reserved fields and bits are set to 0
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
385*4882a593Smuzhiyun (ai.flags & ~CXL_AI_ALL))
386*4882a593Smuzhiyun return -EINVAL;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun return transfer_image(adapter, operation, &ai);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
device_open(struct inode * inode,struct file * file)391*4882a593Smuzhiyun static int device_open(struct inode *inode, struct file *file)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
394*4882a593Smuzhiyun struct cxl *adapter;
395*4882a593Smuzhiyun int rc = 0, i;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* Allows one process to open the device by using a semaphore */
402*4882a593Smuzhiyun if (down_interruptible(&sem) != 0)
403*4882a593Smuzhiyun return -EPERM;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (!(adapter = get_cxl_adapter(adapter_num))) {
406*4882a593Smuzhiyun rc = -ENODEV;
407*4882a593Smuzhiyun goto err_unlock;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun file->private_data = adapter;
411*4882a593Smuzhiyun continue_token = 0;
412*4882a593Smuzhiyun transfer = 0;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
415*4882a593Smuzhiyun buffer[i] = NULL;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* aligned buffer containing list entries which describes up to
418*4882a593Smuzhiyun * 1 megabyte of data (256 entries of 4096 bytes each)
419*4882a593Smuzhiyun * Logical real address of buffer 0 - Buffer 0 length in bytes
420*4882a593Smuzhiyun * Logical real address of buffer 1 - Buffer 1 length in bytes
421*4882a593Smuzhiyun * Logical real address of buffer 2 - Buffer 2 length in bytes
422*4882a593Smuzhiyun * ....
423*4882a593Smuzhiyun * ....
424*4882a593Smuzhiyun * Logical real address of buffer N - Buffer N length in bytes
425*4882a593Smuzhiyun */
426*4882a593Smuzhiyun le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
427*4882a593Smuzhiyun if (!le) {
428*4882a593Smuzhiyun rc = -ENOMEM;
429*4882a593Smuzhiyun goto err;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
433*4882a593Smuzhiyun buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
434*4882a593Smuzhiyun if (!buffer[i]) {
435*4882a593Smuzhiyun rc = -ENOMEM;
436*4882a593Smuzhiyun goto err1;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return 0;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun err1:
443*4882a593Smuzhiyun for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
444*4882a593Smuzhiyun if (buffer[i])
445*4882a593Smuzhiyun free_page((unsigned long) buffer[i]);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (le)
449*4882a593Smuzhiyun free_page((unsigned long) le);
450*4882a593Smuzhiyun err:
451*4882a593Smuzhiyun put_device(&adapter->dev);
452*4882a593Smuzhiyun err_unlock:
453*4882a593Smuzhiyun up(&sem);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun return rc;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
device_ioctl(struct file * file,unsigned int cmd,unsigned long arg)458*4882a593Smuzhiyun static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct cxl *adapter = file->private_data;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
465*4882a593Smuzhiyun return ioctl_transfer_image(adapter,
466*4882a593Smuzhiyun DOWNLOAD_IMAGE,
467*4882a593Smuzhiyun (struct cxl_adapter_image __user *)arg);
468*4882a593Smuzhiyun else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
469*4882a593Smuzhiyun return ioctl_transfer_image(adapter,
470*4882a593Smuzhiyun VALIDATE_IMAGE,
471*4882a593Smuzhiyun (struct cxl_adapter_image __user *)arg);
472*4882a593Smuzhiyun else
473*4882a593Smuzhiyun return -EINVAL;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
device_close(struct inode * inode,struct file * file)476*4882a593Smuzhiyun static int device_close(struct inode *inode, struct file *file)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct cxl *adapter = file->private_data;
479*4882a593Smuzhiyun int i;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun pr_devel("in %s\n", __func__);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
484*4882a593Smuzhiyun if (buffer[i])
485*4882a593Smuzhiyun free_page((unsigned long) buffer[i]);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (le)
489*4882a593Smuzhiyun free_page((unsigned long) le);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun up(&sem);
492*4882a593Smuzhiyun put_device(&adapter->dev);
493*4882a593Smuzhiyun continue_token = 0;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* reload the module */
496*4882a593Smuzhiyun if (transfer)
497*4882a593Smuzhiyun cxl_guest_reload_module(adapter);
498*4882a593Smuzhiyun else {
499*4882a593Smuzhiyun pr_devel("resetting adapter\n");
500*4882a593Smuzhiyun cxl_h_reset_adapter(adapter->guest->handle);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun transfer = 0;
504*4882a593Smuzhiyun return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun static const struct file_operations fops = {
508*4882a593Smuzhiyun .owner = THIS_MODULE,
509*4882a593Smuzhiyun .open = device_open,
510*4882a593Smuzhiyun .unlocked_ioctl = device_ioctl,
511*4882a593Smuzhiyun .compat_ioctl = compat_ptr_ioctl,
512*4882a593Smuzhiyun .release = device_close,
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
cxl_guest_remove_chardev(struct cxl * adapter)515*4882a593Smuzhiyun void cxl_guest_remove_chardev(struct cxl *adapter)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun cdev_del(&adapter->guest->cdev);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
cxl_guest_add_chardev(struct cxl * adapter)520*4882a593Smuzhiyun int cxl_guest_add_chardev(struct cxl *adapter)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun dev_t devt;
523*4882a593Smuzhiyun int rc;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
526*4882a593Smuzhiyun cdev_init(&adapter->guest->cdev, &fops);
527*4882a593Smuzhiyun if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
528*4882a593Smuzhiyun dev_err(&adapter->dev,
529*4882a593Smuzhiyun "Unable to add chardev on adapter (card%i): %i\n",
530*4882a593Smuzhiyun adapter->adapter_num, rc);
531*4882a593Smuzhiyun goto err;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun adapter->dev.devt = devt;
534*4882a593Smuzhiyun sema_init(&sem, 1);
535*4882a593Smuzhiyun err:
536*4882a593Smuzhiyun return rc;
537*4882a593Smuzhiyun }
538