1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * zfcp device driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Setup and helper functions to access QDIO.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright IBM Corp. 2002, 2020
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define KMSG_COMPONENT "zfcp"
11*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include "zfcp_ext.h"
16*4882a593Smuzhiyun #include "zfcp_qdio.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun static bool enable_multibuffer = true;
19*4882a593Smuzhiyun module_param_named(datarouter, enable_multibuffer, bool, 0400);
20*4882a593Smuzhiyun MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
21*4882a593Smuzhiyun
zfcp_qdio_handler_error(struct zfcp_qdio * qdio,char * dbftag,unsigned int qdio_err)22*4882a593Smuzhiyun static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *dbftag,
23*4882a593Smuzhiyun unsigned int qdio_err)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct zfcp_adapter *adapter = qdio->adapter;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun if (qdio_err & QDIO_ERROR_SLSB_STATE) {
30*4882a593Smuzhiyun zfcp_qdio_siosl(adapter);
31*4882a593Smuzhiyun zfcp_erp_adapter_shutdown(adapter, 0, dbftag);
32*4882a593Smuzhiyun return;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun zfcp_erp_adapter_reopen(adapter,
35*4882a593Smuzhiyun ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
36*4882a593Smuzhiyun ZFCP_STATUS_COMMON_ERP_FAILED, dbftag);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
zfcp_qdio_zero_sbals(struct qdio_buffer * sbal[],int first,int cnt)39*4882a593Smuzhiyun static void zfcp_qdio_zero_sbals(struct qdio_buffer *sbal[], int first, int cnt)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun int i, sbal_idx;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun for (i = first; i < first + cnt; i++) {
44*4882a593Smuzhiyun sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q;
45*4882a593Smuzhiyun memset(sbal[sbal_idx], 0, sizeof(struct qdio_buffer));
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* this needs to be called prior to updating the queue fill level */
zfcp_qdio_account(struct zfcp_qdio * qdio)50*4882a593Smuzhiyun static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun unsigned long long now, span;
53*4882a593Smuzhiyun int used;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun now = get_tod_clock_monotonic();
56*4882a593Smuzhiyun span = (now - qdio->req_q_time) >> 12;
57*4882a593Smuzhiyun used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
58*4882a593Smuzhiyun qdio->req_q_util += used * span;
59*4882a593Smuzhiyun qdio->req_q_time = now;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
zfcp_qdio_int_req(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)62*4882a593Smuzhiyun static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
63*4882a593Smuzhiyun int queue_no, int idx, int count,
64*4882a593Smuzhiyun unsigned long parm)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (unlikely(qdio_err)) {
69*4882a593Smuzhiyun zfcp_qdio_handler_error(qdio, "qdireq1", qdio_err);
70*4882a593Smuzhiyun return;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* cleanup all SBALs being program-owned now */
74*4882a593Smuzhiyun zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun spin_lock_irq(&qdio->stat_lock);
77*4882a593Smuzhiyun zfcp_qdio_account(qdio);
78*4882a593Smuzhiyun spin_unlock_irq(&qdio->stat_lock);
79*4882a593Smuzhiyun atomic_add(count, &qdio->req_q_free);
80*4882a593Smuzhiyun wake_up(&qdio->req_q_wq);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
zfcp_qdio_int_resp(struct ccw_device * cdev,unsigned int qdio_err,int queue_no,int idx,int count,unsigned long parm)83*4882a593Smuzhiyun static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
84*4882a593Smuzhiyun int queue_no, int idx, int count,
85*4882a593Smuzhiyun unsigned long parm)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
88*4882a593Smuzhiyun struct zfcp_adapter *adapter = qdio->adapter;
89*4882a593Smuzhiyun int sbal_no, sbal_idx;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (unlikely(qdio_err)) {
92*4882a593Smuzhiyun if (zfcp_adapter_multi_buffer_active(adapter)) {
93*4882a593Smuzhiyun void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
94*4882a593Smuzhiyun struct qdio_buffer_element *sbale;
95*4882a593Smuzhiyun u64 req_id;
96*4882a593Smuzhiyun u8 scount;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun memset(pl, 0,
99*4882a593Smuzhiyun ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
100*4882a593Smuzhiyun sbale = qdio->res_q[idx]->element;
101*4882a593Smuzhiyun req_id = sbale->addr;
102*4882a593Smuzhiyun scount = min(sbale->scount + 1,
103*4882a593Smuzhiyun ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
104*4882a593Smuzhiyun /* incl. signaling SBAL */
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun for (sbal_no = 0; sbal_no < scount; sbal_no++) {
107*4882a593Smuzhiyun sbal_idx = (idx + sbal_no) %
108*4882a593Smuzhiyun QDIO_MAX_BUFFERS_PER_Q;
109*4882a593Smuzhiyun pl[sbal_no] = qdio->res_q[sbal_idx];
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
114*4882a593Smuzhiyun return;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * go through all SBALs from input queue currently
119*4882a593Smuzhiyun * returned by QDIO layer
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun for (sbal_no = 0; sbal_no < count; sbal_no++) {
122*4882a593Smuzhiyun sbal_idx = (idx + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
123*4882a593Smuzhiyun /* go through all SBALEs of SBAL */
124*4882a593Smuzhiyun zfcp_fsf_reqid_check(qdio, sbal_idx);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * put SBALs back to response queue
129*4882a593Smuzhiyun */
130*4882a593Smuzhiyun if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, idx, count))
131*4882a593Smuzhiyun zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdires2");
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun static struct qdio_buffer_element *
zfcp_qdio_sbal_chain(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)135*4882a593Smuzhiyun zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct qdio_buffer_element *sbale;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* set last entry flag in current SBALE of current SBAL */
140*4882a593Smuzhiyun sbale = zfcp_qdio_sbale_curr(qdio, q_req);
141*4882a593Smuzhiyun sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* don't exceed last allowed SBAL */
144*4882a593Smuzhiyun if (q_req->sbal_last == q_req->sbal_limit)
145*4882a593Smuzhiyun return NULL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* set chaining flag in first SBALE of current SBAL */
148*4882a593Smuzhiyun sbale = zfcp_qdio_sbale_req(qdio, q_req);
149*4882a593Smuzhiyun sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* calculate index of next SBAL */
152*4882a593Smuzhiyun q_req->sbal_last++;
153*4882a593Smuzhiyun q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* keep this requests number of SBALs up-to-date */
156*4882a593Smuzhiyun q_req->sbal_number++;
157*4882a593Smuzhiyun BUG_ON(q_req->sbal_number > ZFCP_QDIO_MAX_SBALS_PER_REQ);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* start at first SBALE of new SBAL */
160*4882a593Smuzhiyun q_req->sbale_curr = 0;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* set storage-block type for new SBAL */
163*4882a593Smuzhiyun sbale = zfcp_qdio_sbale_curr(qdio, q_req);
164*4882a593Smuzhiyun sbale->sflags |= q_req->sbtype;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return sbale;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static struct qdio_buffer_element *
zfcp_qdio_sbale_next(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)170*4882a593Smuzhiyun zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
173*4882a593Smuzhiyun return zfcp_qdio_sbal_chain(qdio, q_req);
174*4882a593Smuzhiyun q_req->sbale_curr++;
175*4882a593Smuzhiyun return zfcp_qdio_sbale_curr(qdio, q_req);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
180*4882a593Smuzhiyun * @qdio: pointer to struct zfcp_qdio
181*4882a593Smuzhiyun * @q_req: pointer to struct zfcp_qdio_req
182*4882a593Smuzhiyun * @sg: scatter-gather list
183*4882a593Smuzhiyun * Returns: zero or -EINVAL on error
184*4882a593Smuzhiyun */
zfcp_qdio_sbals_from_sg(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg)185*4882a593Smuzhiyun int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
186*4882a593Smuzhiyun struct scatterlist *sg)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun struct qdio_buffer_element *sbale;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* set storage-block type for this request */
191*4882a593Smuzhiyun sbale = zfcp_qdio_sbale_req(qdio, q_req);
192*4882a593Smuzhiyun sbale->sflags |= q_req->sbtype;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun for (; sg; sg = sg_next(sg)) {
195*4882a593Smuzhiyun sbale = zfcp_qdio_sbale_next(qdio, q_req);
196*4882a593Smuzhiyun if (!sbale) {
197*4882a593Smuzhiyun atomic_inc(&qdio->req_q_full);
198*4882a593Smuzhiyun zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
199*4882a593Smuzhiyun q_req->sbal_number);
200*4882a593Smuzhiyun return -EINVAL;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun sbale->addr = sg_phys(sg);
203*4882a593Smuzhiyun sbale->length = sg->length;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
zfcp_qdio_sbal_check(struct zfcp_qdio * qdio)208*4882a593Smuzhiyun static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun if (atomic_read(&qdio->req_q_free) ||
211*4882a593Smuzhiyun !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
212*4882a593Smuzhiyun return 1;
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * zfcp_qdio_sbal_get - get free sbal in request queue, wait if necessary
218*4882a593Smuzhiyun * @qdio: pointer to struct zfcp_qdio
219*4882a593Smuzhiyun *
220*4882a593Smuzhiyun * The req_q_lock must be held by the caller of this function, and
221*4882a593Smuzhiyun * this function may only be called from process context; it will
222*4882a593Smuzhiyun * sleep when waiting for a free sbal.
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Returns: 0 on success, -EIO if there is no free sbal after waiting.
225*4882a593Smuzhiyun */
zfcp_qdio_sbal_get(struct zfcp_qdio * qdio)226*4882a593Smuzhiyun int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun long ret;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
231*4882a593Smuzhiyun zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
234*4882a593Smuzhiyun return -EIO;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (ret > 0)
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (!ret) {
240*4882a593Smuzhiyun atomic_inc(&qdio->req_q_full);
241*4882a593Smuzhiyun /* assume hanging outbound queue, try queue recovery */
242*4882a593Smuzhiyun zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return -EIO;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /**
249*4882a593Smuzhiyun * zfcp_qdio_send - send req to QDIO
250*4882a593Smuzhiyun * @qdio: pointer to struct zfcp_qdio
251*4882a593Smuzhiyun * @q_req: pointer to struct zfcp_qdio_req
252*4882a593Smuzhiyun * Returns: 0 on success, error otherwise
253*4882a593Smuzhiyun */
zfcp_qdio_send(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req)254*4882a593Smuzhiyun int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun int retval;
257*4882a593Smuzhiyun u8 sbal_number = q_req->sbal_number;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun spin_lock(&qdio->stat_lock);
260*4882a593Smuzhiyun zfcp_qdio_account(qdio);
261*4882a593Smuzhiyun spin_unlock(&qdio->stat_lock);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun atomic_sub(sbal_number, &qdio->req_q_free);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
266*4882a593Smuzhiyun q_req->sbal_first, sbal_number);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (unlikely(retval)) {
269*4882a593Smuzhiyun /* Failed to submit the IO, roll back our modifications. */
270*4882a593Smuzhiyun atomic_add(sbal_number, &qdio->req_q_free);
271*4882a593Smuzhiyun zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
272*4882a593Smuzhiyun sbal_number);
273*4882a593Smuzhiyun return retval;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* account for transferred buffers */
277*4882a593Smuzhiyun qdio->req_q_idx += sbal_number;
278*4882a593Smuzhiyun qdio->req_q_idx %= QDIO_MAX_BUFFERS_PER_Q;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
285*4882a593Smuzhiyun * @qdio: pointer to struct zfcp_qdio
286*4882a593Smuzhiyun * Returns: -ENOMEM on memory allocation error or return value from
287*4882a593Smuzhiyun * qdio_allocate
288*4882a593Smuzhiyun */
zfcp_qdio_allocate(struct zfcp_qdio * qdio)289*4882a593Smuzhiyun static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun int ret;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
294*4882a593Smuzhiyun if (ret)
295*4882a593Smuzhiyun return -ENOMEM;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
298*4882a593Smuzhiyun if (ret)
299*4882a593Smuzhiyun goto free_req_q;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun init_waitqueue_head(&qdio->req_q_wq);
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
304*4882a593Smuzhiyun if (ret)
305*4882a593Smuzhiyun goto free_res_q;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun free_res_q:
310*4882a593Smuzhiyun qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
311*4882a593Smuzhiyun free_req_q:
312*4882a593Smuzhiyun qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
313*4882a593Smuzhiyun return ret;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun * zfcp_close_qdio - close qdio queues for an adapter
318*4882a593Smuzhiyun * @qdio: pointer to structure zfcp_qdio
319*4882a593Smuzhiyun */
zfcp_qdio_close(struct zfcp_qdio * qdio)320*4882a593Smuzhiyun void zfcp_qdio_close(struct zfcp_qdio *qdio)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun struct zfcp_adapter *adapter = qdio->adapter;
323*4882a593Smuzhiyun int idx, count;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
326*4882a593Smuzhiyun return;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
329*4882a593Smuzhiyun spin_lock_irq(&qdio->req_q_lock);
330*4882a593Smuzhiyun atomic_andnot(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
331*4882a593Smuzhiyun spin_unlock_irq(&qdio->req_q_lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun wake_up(&qdio->req_q_wq);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* cleanup used outbound sbals */
338*4882a593Smuzhiyun count = atomic_read(&qdio->req_q_free);
339*4882a593Smuzhiyun if (count < QDIO_MAX_BUFFERS_PER_Q) {
340*4882a593Smuzhiyun idx = (qdio->req_q_idx + count) % QDIO_MAX_BUFFERS_PER_Q;
341*4882a593Smuzhiyun count = QDIO_MAX_BUFFERS_PER_Q - count;
342*4882a593Smuzhiyun zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun qdio->req_q_idx = 0;
345*4882a593Smuzhiyun atomic_set(&qdio->req_q_free, 0);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
zfcp_qdio_shost_update(struct zfcp_adapter * const adapter,const struct zfcp_qdio * const qdio)348*4882a593Smuzhiyun void zfcp_qdio_shost_update(struct zfcp_adapter *const adapter,
349*4882a593Smuzhiyun const struct zfcp_qdio *const qdio)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun struct Scsi_Host *const shost = adapter->scsi_host;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (shost == NULL)
354*4882a593Smuzhiyun return;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun shost->sg_tablesize = qdio->max_sbale_per_req;
357*4882a593Smuzhiyun shost->max_sectors = qdio->max_sbale_per_req * 8;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * zfcp_qdio_open - prepare and initialize response queue
362*4882a593Smuzhiyun * @qdio: pointer to struct zfcp_qdio
363*4882a593Smuzhiyun * Returns: 0 on success, otherwise -EIO
364*4882a593Smuzhiyun */
zfcp_qdio_open(struct zfcp_qdio * qdio)365*4882a593Smuzhiyun int zfcp_qdio_open(struct zfcp_qdio *qdio)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun struct qdio_buffer **input_sbals[1] = {qdio->res_q};
368*4882a593Smuzhiyun struct qdio_buffer **output_sbals[1] = {qdio->req_q};
369*4882a593Smuzhiyun struct qdio_buffer_element *sbale;
370*4882a593Smuzhiyun struct qdio_initialize init_data = {0};
371*4882a593Smuzhiyun struct zfcp_adapter *adapter = qdio->adapter;
372*4882a593Smuzhiyun struct ccw_device *cdev = adapter->ccw_device;
373*4882a593Smuzhiyun struct qdio_ssqd_desc ssqd;
374*4882a593Smuzhiyun int cc;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
377*4882a593Smuzhiyun return -EIO;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
380*4882a593Smuzhiyun &qdio->adapter->status);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun init_data.q_format = QDIO_ZFCP_QFMT;
383*4882a593Smuzhiyun init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
384*4882a593Smuzhiyun if (enable_multibuffer)
385*4882a593Smuzhiyun init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
386*4882a593Smuzhiyun init_data.no_input_qs = 1;
387*4882a593Smuzhiyun init_data.no_output_qs = 1;
388*4882a593Smuzhiyun init_data.input_handler = zfcp_qdio_int_resp;
389*4882a593Smuzhiyun init_data.output_handler = zfcp_qdio_int_req;
390*4882a593Smuzhiyun init_data.int_parm = (unsigned long) qdio;
391*4882a593Smuzhiyun init_data.input_sbal_addr_array = input_sbals;
392*4882a593Smuzhiyun init_data.output_sbal_addr_array = output_sbals;
393*4882a593Smuzhiyun init_data.scan_threshold =
394*4882a593Smuzhiyun QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (qdio_establish(cdev, &init_data))
397*4882a593Smuzhiyun goto failed_establish;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (qdio_get_ssqd_desc(cdev, &ssqd))
400*4882a593Smuzhiyun goto failed_qdio;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
403*4882a593Smuzhiyun atomic_or(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
404*4882a593Smuzhiyun &qdio->adapter->status);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
407*4882a593Smuzhiyun atomic_or(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
408*4882a593Smuzhiyun qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
409*4882a593Smuzhiyun } else {
410*4882a593Smuzhiyun atomic_andnot(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
411*4882a593Smuzhiyun qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun qdio->max_sbale_per_req =
415*4882a593Smuzhiyun ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
416*4882a593Smuzhiyun - 2;
417*4882a593Smuzhiyun if (qdio_activate(cdev))
418*4882a593Smuzhiyun goto failed_qdio;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
421*4882a593Smuzhiyun sbale = &(qdio->res_q[cc]->element[0]);
422*4882a593Smuzhiyun sbale->length = 0;
423*4882a593Smuzhiyun sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
424*4882a593Smuzhiyun sbale->sflags = 0;
425*4882a593Smuzhiyun sbale->addr = 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
429*4882a593Smuzhiyun goto failed_qdio;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* set index of first available SBALS / number of available SBALS */
432*4882a593Smuzhiyun qdio->req_q_idx = 0;
433*4882a593Smuzhiyun atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
434*4882a593Smuzhiyun atomic_or(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun zfcp_qdio_shost_update(adapter, qdio);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return 0;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun failed_qdio:
441*4882a593Smuzhiyun qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
442*4882a593Smuzhiyun failed_establish:
443*4882a593Smuzhiyun dev_err(&cdev->dev,
444*4882a593Smuzhiyun "Setting up the QDIO connection to the FCP adapter failed\n");
445*4882a593Smuzhiyun return -EIO;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
zfcp_qdio_destroy(struct zfcp_qdio * qdio)448*4882a593Smuzhiyun void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun if (!qdio)
451*4882a593Smuzhiyun return;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (qdio->adapter->ccw_device)
454*4882a593Smuzhiyun qdio_free(qdio->adapter->ccw_device);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
457*4882a593Smuzhiyun qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
458*4882a593Smuzhiyun kfree(qdio);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
zfcp_qdio_setup(struct zfcp_adapter * adapter)461*4882a593Smuzhiyun int zfcp_qdio_setup(struct zfcp_adapter *adapter)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct zfcp_qdio *qdio;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
466*4882a593Smuzhiyun if (!qdio)
467*4882a593Smuzhiyun return -ENOMEM;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun qdio->adapter = adapter;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (zfcp_qdio_allocate(qdio)) {
472*4882a593Smuzhiyun kfree(qdio);
473*4882a593Smuzhiyun return -ENOMEM;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun spin_lock_init(&qdio->req_q_lock);
477*4882a593Smuzhiyun spin_lock_init(&qdio->stat_lock);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun adapter->qdio = qdio;
480*4882a593Smuzhiyun return 0;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun * zfcp_qdio_siosl - Trigger logging in FCP channel
485*4882a593Smuzhiyun * @adapter: The zfcp_adapter where to trigger logging
486*4882a593Smuzhiyun *
487*4882a593Smuzhiyun * Call the cio siosl function to trigger hardware logging. This
488*4882a593Smuzhiyun * wrapper function sets a flag to ensure hardware logging is only
489*4882a593Smuzhiyun * triggered once before going through qdio shutdown.
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * The triggers are always run from qdio tasklet context, so no
492*4882a593Smuzhiyun * additional synchronization is necessary.
493*4882a593Smuzhiyun */
zfcp_qdio_siosl(struct zfcp_adapter * adapter)494*4882a593Smuzhiyun void zfcp_qdio_siosl(struct zfcp_adapter *adapter)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun int rc;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_SIOSL_ISSUED)
499*4882a593Smuzhiyun return;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun rc = ccw_device_siosl(adapter->ccw_device);
502*4882a593Smuzhiyun if (!rc)
503*4882a593Smuzhiyun atomic_or(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
504*4882a593Smuzhiyun &adapter->status);
505*4882a593Smuzhiyun }
506