1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * zfcp device driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Debug traces for zfcp.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright IBM Corp. 2002, 2020
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define KMSG_COMPONENT "zfcp"
11*4882a593Smuzhiyun #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/ctype.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <asm/debug.h>
17*4882a593Smuzhiyun #include "zfcp_dbf.h"
18*4882a593Smuzhiyun #include "zfcp_ext.h"
19*4882a593Smuzhiyun #include "zfcp_fc.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static u32 dbfsize = 4;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun module_param(dbfsize, uint, 0400);
24*4882a593Smuzhiyun MODULE_PARM_DESC(dbfsize,
25*4882a593Smuzhiyun "number of pages for each debug feature area (default 4)");
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static u32 dbflevel = 3;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun module_param(dbflevel, uint, 0400);
30*4882a593Smuzhiyun MODULE_PARM_DESC(dbflevel,
31*4882a593Smuzhiyun "log level for each debug feature area "
32*4882a593Smuzhiyun "(default 3, range 0..6)");
33*4882a593Smuzhiyun
zfcp_dbf_plen(unsigned int offset)34*4882a593Smuzhiyun static inline unsigned int zfcp_dbf_plen(unsigned int offset)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return sizeof(struct zfcp_dbf_pay) + offset - ZFCP_DBF_PAY_MAX_REC;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static inline
zfcp_dbf_pl_write(struct zfcp_dbf * dbf,void * data,u16 length,char * area,u64 req_id)40*4882a593Smuzhiyun void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
41*4882a593Smuzhiyun u64 req_id)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct zfcp_dbf_pay *pl = &dbf->pay_buf;
44*4882a593Smuzhiyun u16 offset = 0, rec_length;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun spin_lock(&dbf->pay_lock);
47*4882a593Smuzhiyun memset(pl, 0, sizeof(*pl));
48*4882a593Smuzhiyun pl->fsf_req_id = req_id;
49*4882a593Smuzhiyun memcpy(pl->area, area, ZFCP_DBF_TAG_LEN);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun while (offset < length) {
52*4882a593Smuzhiyun rec_length = min((u16) ZFCP_DBF_PAY_MAX_REC,
53*4882a593Smuzhiyun (u16) (length - offset));
54*4882a593Smuzhiyun memcpy(pl->data, data + offset, rec_length);
55*4882a593Smuzhiyun debug_event(dbf->pay, 1, pl, zfcp_dbf_plen(rec_length));
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun offset += rec_length;
58*4882a593Smuzhiyun pl->counter++;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun spin_unlock(&dbf->pay_lock);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**
65*4882a593Smuzhiyun * zfcp_dbf_hba_fsf_res - trace event for fsf responses
66*4882a593Smuzhiyun * @tag: tag indicating which kind of FSF response has been received
67*4882a593Smuzhiyun * @level: trace level to be used for event
68*4882a593Smuzhiyun * @req: request for which a response was received
69*4882a593Smuzhiyun */
zfcp_dbf_hba_fsf_res(char * tag,int level,struct zfcp_fsf_req * req)70*4882a593Smuzhiyun void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct zfcp_dbf *dbf = req->adapter->dbf;
73*4882a593Smuzhiyun struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
74*4882a593Smuzhiyun struct fsf_qtcb_header *q_head = &req->qtcb->header;
75*4882a593Smuzhiyun struct zfcp_dbf_hba *rec = &dbf->hba_buf;
76*4882a593Smuzhiyun unsigned long flags;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun spin_lock_irqsave(&dbf->hba_lock, flags);
79*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
82*4882a593Smuzhiyun rec->id = ZFCP_DBF_HBA_RES;
83*4882a593Smuzhiyun rec->fsf_req_id = req->req_id;
84*4882a593Smuzhiyun rec->fsf_req_status = req->status;
85*4882a593Smuzhiyun rec->fsf_cmd = q_head->fsf_command;
86*4882a593Smuzhiyun rec->fsf_seq_no = q_pref->req_seq_no;
87*4882a593Smuzhiyun rec->u.res.req_issued = req->issued;
88*4882a593Smuzhiyun rec->u.res.prot_status = q_pref->prot_status;
89*4882a593Smuzhiyun rec->u.res.fsf_status = q_head->fsf_status;
90*4882a593Smuzhiyun rec->u.res.port_handle = q_head->port_handle;
91*4882a593Smuzhiyun rec->u.res.lun_handle = q_head->lun_handle;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
94*4882a593Smuzhiyun FSF_PROT_STATUS_QUAL_SIZE);
95*4882a593Smuzhiyun memcpy(rec->u.res.fsf_status_qual, &q_head->fsf_status_qual,
96*4882a593Smuzhiyun FSF_STATUS_QUALIFIER_SIZE);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun rec->pl_len = q_head->log_length;
99*4882a593Smuzhiyun zfcp_dbf_pl_write(dbf, (char *)q_pref + q_head->log_start,
100*4882a593Smuzhiyun rec->pl_len, "fsf_res", req->req_id);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun debug_event(dbf->hba, level, rec, sizeof(*rec));
103*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->hba_lock, flags);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /**
107*4882a593Smuzhiyun * zfcp_dbf_hba_fsf_fces - trace event for fsf responses related to
108*4882a593Smuzhiyun * FC Endpoint Security (FCES)
109*4882a593Smuzhiyun * @tag: tag indicating which kind of FC Endpoint Security event has occurred
110*4882a593Smuzhiyun * @req: request for which a response was received
111*4882a593Smuzhiyun * @wwpn: remote port or ZFCP_DBF_INVALID_WWPN
112*4882a593Smuzhiyun * @fc_security_old: old FC Endpoint Security of FCP device or connection
113*4882a593Smuzhiyun * @fc_security_new: new FC Endpoint Security of FCP device or connection
114*4882a593Smuzhiyun */
zfcp_dbf_hba_fsf_fces(char * tag,const struct zfcp_fsf_req * req,u64 wwpn,u32 fc_security_old,u32 fc_security_new)115*4882a593Smuzhiyun void zfcp_dbf_hba_fsf_fces(char *tag, const struct zfcp_fsf_req *req, u64 wwpn,
116*4882a593Smuzhiyun u32 fc_security_old, u32 fc_security_new)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct zfcp_dbf *dbf = req->adapter->dbf;
119*4882a593Smuzhiyun struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
120*4882a593Smuzhiyun struct fsf_qtcb_header *q_head = &req->qtcb->header;
121*4882a593Smuzhiyun struct zfcp_dbf_hba *rec = &dbf->hba_buf;
122*4882a593Smuzhiyun static int const level = 3;
123*4882a593Smuzhiyun unsigned long flags;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->hba, level)))
126*4882a593Smuzhiyun return;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun spin_lock_irqsave(&dbf->hba_lock, flags);
129*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
132*4882a593Smuzhiyun rec->id = ZFCP_DBF_HBA_FCES;
133*4882a593Smuzhiyun rec->fsf_req_id = req->req_id;
134*4882a593Smuzhiyun rec->fsf_req_status = req->status;
135*4882a593Smuzhiyun rec->fsf_cmd = q_head->fsf_command;
136*4882a593Smuzhiyun rec->fsf_seq_no = q_pref->req_seq_no;
137*4882a593Smuzhiyun rec->u.fces.req_issued = req->issued;
138*4882a593Smuzhiyun rec->u.fces.fsf_status = q_head->fsf_status;
139*4882a593Smuzhiyun rec->u.fces.port_handle = q_head->port_handle;
140*4882a593Smuzhiyun rec->u.fces.wwpn = wwpn;
141*4882a593Smuzhiyun rec->u.fces.fc_security_old = fc_security_old;
142*4882a593Smuzhiyun rec->u.fces.fc_security_new = fc_security_new;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun debug_event(dbf->hba, level, rec, sizeof(*rec));
145*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->hba_lock, flags);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /**
149*4882a593Smuzhiyun * zfcp_dbf_hba_fsf_uss - trace event for an unsolicited status buffer
150*4882a593Smuzhiyun * @tag: tag indicating which kind of unsolicited status has been received
151*4882a593Smuzhiyun * @req: request providing the unsolicited status
152*4882a593Smuzhiyun */
zfcp_dbf_hba_fsf_uss(char * tag,struct zfcp_fsf_req * req)153*4882a593Smuzhiyun void zfcp_dbf_hba_fsf_uss(char *tag, struct zfcp_fsf_req *req)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct zfcp_dbf *dbf = req->adapter->dbf;
156*4882a593Smuzhiyun struct fsf_status_read_buffer *srb = req->data;
157*4882a593Smuzhiyun struct zfcp_dbf_hba *rec = &dbf->hba_buf;
158*4882a593Smuzhiyun static int const level = 2;
159*4882a593Smuzhiyun unsigned long flags;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->hba, level)))
162*4882a593Smuzhiyun return;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun spin_lock_irqsave(&dbf->hba_lock, flags);
165*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
168*4882a593Smuzhiyun rec->id = ZFCP_DBF_HBA_USS;
169*4882a593Smuzhiyun rec->fsf_req_id = req->req_id;
170*4882a593Smuzhiyun rec->fsf_req_status = req->status;
171*4882a593Smuzhiyun rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (!srb)
174*4882a593Smuzhiyun goto log;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun rec->u.uss.status_type = srb->status_type;
177*4882a593Smuzhiyun rec->u.uss.status_subtype = srb->status_subtype;
178*4882a593Smuzhiyun rec->u.uss.d_id = ntoh24(srb->d_id);
179*4882a593Smuzhiyun rec->u.uss.lun = srb->fcp_lun;
180*4882a593Smuzhiyun memcpy(&rec->u.uss.queue_designator, &srb->queue_designator,
181*4882a593Smuzhiyun sizeof(rec->u.uss.queue_designator));
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* status read buffer payload length */
184*4882a593Smuzhiyun rec->pl_len = (!srb->length) ? 0 : srb->length -
185*4882a593Smuzhiyun offsetof(struct fsf_status_read_buffer, payload);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (rec->pl_len)
188*4882a593Smuzhiyun zfcp_dbf_pl_write(dbf, srb->payload.data, rec->pl_len,
189*4882a593Smuzhiyun "fsf_uss", req->req_id);
190*4882a593Smuzhiyun log:
191*4882a593Smuzhiyun debug_event(dbf->hba, level, rec, sizeof(*rec));
192*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->hba_lock, flags);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun * zfcp_dbf_hba_bit_err - trace event for bit error conditions
197*4882a593Smuzhiyun * @tag: tag indicating which kind of bit error unsolicited status was received
198*4882a593Smuzhiyun * @req: request which caused the bit_error condition
199*4882a593Smuzhiyun */
zfcp_dbf_hba_bit_err(char * tag,struct zfcp_fsf_req * req)200*4882a593Smuzhiyun void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct zfcp_dbf *dbf = req->adapter->dbf;
203*4882a593Smuzhiyun struct zfcp_dbf_hba *rec = &dbf->hba_buf;
204*4882a593Smuzhiyun struct fsf_status_read_buffer *sr_buf = req->data;
205*4882a593Smuzhiyun static int const level = 1;
206*4882a593Smuzhiyun unsigned long flags;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->hba, level)))
209*4882a593Smuzhiyun return;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun spin_lock_irqsave(&dbf->hba_lock, flags);
212*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
215*4882a593Smuzhiyun rec->id = ZFCP_DBF_HBA_BIT;
216*4882a593Smuzhiyun rec->fsf_req_id = req->req_id;
217*4882a593Smuzhiyun rec->fsf_req_status = req->status;
218*4882a593Smuzhiyun rec->fsf_cmd = FSF_QTCB_UNSOLICITED_STATUS;
219*4882a593Smuzhiyun memcpy(&rec->u.be, &sr_buf->payload.bit_error,
220*4882a593Smuzhiyun sizeof(struct fsf_bit_error_payload));
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun debug_event(dbf->hba, level, rec, sizeof(*rec));
223*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->hba_lock, flags);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * zfcp_dbf_hba_def_err - trace event for deferred error messages
228*4882a593Smuzhiyun * @adapter: pointer to struct zfcp_adapter
229*4882a593Smuzhiyun * @req_id: request id which caused the deferred error message
230*4882a593Smuzhiyun * @scount: number of sbals incl. the signaling sbal
231*4882a593Smuzhiyun * @pl: array of all involved sbals
232*4882a593Smuzhiyun */
zfcp_dbf_hba_def_err(struct zfcp_adapter * adapter,u64 req_id,u16 scount,void ** pl)233*4882a593Smuzhiyun void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
234*4882a593Smuzhiyun void **pl)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
237*4882a593Smuzhiyun struct zfcp_dbf_pay *payload = &dbf->pay_buf;
238*4882a593Smuzhiyun unsigned long flags;
239*4882a593Smuzhiyun static int const level = 1;
240*4882a593Smuzhiyun u16 length;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->pay, level)))
243*4882a593Smuzhiyun return;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (!pl)
246*4882a593Smuzhiyun return;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun spin_lock_irqsave(&dbf->pay_lock, flags);
249*4882a593Smuzhiyun memset(payload, 0, sizeof(*payload));
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun memcpy(payload->area, "def_err", 7);
252*4882a593Smuzhiyun payload->fsf_req_id = req_id;
253*4882a593Smuzhiyun payload->counter = 0;
254*4882a593Smuzhiyun length = min((u16)sizeof(struct qdio_buffer),
255*4882a593Smuzhiyun (u16)ZFCP_DBF_PAY_MAX_REC);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun while (payload->counter < scount && (char *)pl[payload->counter]) {
258*4882a593Smuzhiyun memcpy(payload->data, (char *)pl[payload->counter], length);
259*4882a593Smuzhiyun debug_event(dbf->pay, level, payload, zfcp_dbf_plen(length));
260*4882a593Smuzhiyun payload->counter++;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->pay_lock, flags);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun * zfcp_dbf_hba_basic - trace event for basic adapter events
268*4882a593Smuzhiyun * @tag: identifier for event
269*4882a593Smuzhiyun * @adapter: pointer to struct zfcp_adapter
270*4882a593Smuzhiyun */
zfcp_dbf_hba_basic(char * tag,struct zfcp_adapter * adapter)271*4882a593Smuzhiyun void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
274*4882a593Smuzhiyun struct zfcp_dbf_hba *rec = &dbf->hba_buf;
275*4882a593Smuzhiyun static int const level = 1;
276*4882a593Smuzhiyun unsigned long flags;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->hba, level)))
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun spin_lock_irqsave(&dbf->hba_lock, flags);
282*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
285*4882a593Smuzhiyun rec->id = ZFCP_DBF_HBA_BASIC;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun debug_event(dbf->hba, level, rec, sizeof(*rec));
288*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->hba_lock, flags);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
zfcp_dbf_set_common(struct zfcp_dbf_rec * rec,struct zfcp_adapter * adapter,struct zfcp_port * port,struct scsi_device * sdev)291*4882a593Smuzhiyun static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
292*4882a593Smuzhiyun struct zfcp_adapter *adapter,
293*4882a593Smuzhiyun struct zfcp_port *port,
294*4882a593Smuzhiyun struct scsi_device *sdev)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun rec->adapter_status = atomic_read(&adapter->status);
297*4882a593Smuzhiyun if (port) {
298*4882a593Smuzhiyun rec->port_status = atomic_read(&port->status);
299*4882a593Smuzhiyun rec->wwpn = port->wwpn;
300*4882a593Smuzhiyun rec->d_id = port->d_id;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun if (sdev) {
303*4882a593Smuzhiyun rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
304*4882a593Smuzhiyun rec->lun = zfcp_scsi_dev_lun(sdev);
305*4882a593Smuzhiyun } else
306*4882a593Smuzhiyun rec->lun = ZFCP_DBF_INVALID_LUN;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun * zfcp_dbf_rec_trig - trace event related to triggered recovery
311*4882a593Smuzhiyun * @tag: identifier for event
312*4882a593Smuzhiyun * @adapter: adapter on which the erp_action should run
313*4882a593Smuzhiyun * @port: remote port involved in the erp_action
314*4882a593Smuzhiyun * @sdev: scsi device involved in the erp_action
315*4882a593Smuzhiyun * @want: wanted erp_action
316*4882a593Smuzhiyun * @need: required erp_action
317*4882a593Smuzhiyun *
318*4882a593Smuzhiyun * The adapter->erp_lock has to be held.
319*4882a593Smuzhiyun */
zfcp_dbf_rec_trig(char * tag,struct zfcp_adapter * adapter,struct zfcp_port * port,struct scsi_device * sdev,u8 want,u8 need)320*4882a593Smuzhiyun void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
321*4882a593Smuzhiyun struct zfcp_port *port, struct scsi_device *sdev,
322*4882a593Smuzhiyun u8 want, u8 need)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
325*4882a593Smuzhiyun struct zfcp_dbf_rec *rec = &dbf->rec_buf;
326*4882a593Smuzhiyun static int const level = 1;
327*4882a593Smuzhiyun struct list_head *entry;
328*4882a593Smuzhiyun unsigned long flags;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun lockdep_assert_held(&adapter->erp_lock);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->rec, level)))
333*4882a593Smuzhiyun return;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun spin_lock_irqsave(&dbf->rec_lock, flags);
336*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun rec->id = ZFCP_DBF_REC_TRIG;
339*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
340*4882a593Smuzhiyun zfcp_dbf_set_common(rec, adapter, port, sdev);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun list_for_each(entry, &adapter->erp_ready_head)
343*4882a593Smuzhiyun rec->u.trig.ready++;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun list_for_each(entry, &adapter->erp_running_head)
346*4882a593Smuzhiyun rec->u.trig.running++;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun rec->u.trig.want = want;
349*4882a593Smuzhiyun rec->u.trig.need = need;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun debug_event(dbf->rec, level, rec, sizeof(*rec));
352*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->rec_lock, flags);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /**
356*4882a593Smuzhiyun * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
357*4882a593Smuzhiyun * @tag: identifier for event
358*4882a593Smuzhiyun * @adapter: adapter on which the erp_action should run
359*4882a593Smuzhiyun * @port: remote port involved in the erp_action
360*4882a593Smuzhiyun * @sdev: scsi device involved in the erp_action
361*4882a593Smuzhiyun * @want: wanted erp_action
362*4882a593Smuzhiyun * @need: required erp_action
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * The adapter->erp_lock must not be held.
365*4882a593Smuzhiyun */
zfcp_dbf_rec_trig_lock(char * tag,struct zfcp_adapter * adapter,struct zfcp_port * port,struct scsi_device * sdev,u8 want,u8 need)366*4882a593Smuzhiyun void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
367*4882a593Smuzhiyun struct zfcp_port *port, struct scsi_device *sdev,
368*4882a593Smuzhiyun u8 want, u8 need)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun unsigned long flags;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun read_lock_irqsave(&adapter->erp_lock, flags);
373*4882a593Smuzhiyun zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
374*4882a593Smuzhiyun read_unlock_irqrestore(&adapter->erp_lock, flags);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * zfcp_dbf_rec_run_lvl - trace event related to running recovery
379*4882a593Smuzhiyun * @level: trace level to be used for event
380*4882a593Smuzhiyun * @tag: identifier for event
381*4882a593Smuzhiyun * @erp: erp_action running
382*4882a593Smuzhiyun */
zfcp_dbf_rec_run_lvl(int level,char * tag,struct zfcp_erp_action * erp)383*4882a593Smuzhiyun void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct zfcp_dbf *dbf = erp->adapter->dbf;
386*4882a593Smuzhiyun struct zfcp_dbf_rec *rec = &dbf->rec_buf;
387*4882a593Smuzhiyun unsigned long flags;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun if (!debug_level_enabled(dbf->rec, level))
390*4882a593Smuzhiyun return;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun spin_lock_irqsave(&dbf->rec_lock, flags);
393*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun rec->id = ZFCP_DBF_REC_RUN;
396*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
397*4882a593Smuzhiyun zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun rec->u.run.fsf_req_id = erp->fsf_req_id;
400*4882a593Smuzhiyun rec->u.run.rec_status = erp->status;
401*4882a593Smuzhiyun rec->u.run.rec_step = erp->step;
402*4882a593Smuzhiyun rec->u.run.rec_action = erp->type;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (erp->sdev)
405*4882a593Smuzhiyun rec->u.run.rec_count =
406*4882a593Smuzhiyun atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter);
407*4882a593Smuzhiyun else if (erp->port)
408*4882a593Smuzhiyun rec->u.run.rec_count = atomic_read(&erp->port->erp_counter);
409*4882a593Smuzhiyun else
410*4882a593Smuzhiyun rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun debug_event(dbf->rec, level, rec, sizeof(*rec));
413*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->rec_lock, flags);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /**
417*4882a593Smuzhiyun * zfcp_dbf_rec_run - trace event related to running recovery
418*4882a593Smuzhiyun * @tag: identifier for event
419*4882a593Smuzhiyun * @erp: erp_action running
420*4882a593Smuzhiyun */
zfcp_dbf_rec_run(char * tag,struct zfcp_erp_action * erp)421*4882a593Smuzhiyun void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun zfcp_dbf_rec_run_lvl(1, tag, erp);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /**
427*4882a593Smuzhiyun * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
428*4882a593Smuzhiyun * @tag: identifier for event
429*4882a593Smuzhiyun * @wka_port: well known address port
430*4882a593Smuzhiyun * @req_id: request ID to correlate with potential HBA trace record
431*4882a593Smuzhiyun */
zfcp_dbf_rec_run_wka(char * tag,struct zfcp_fc_wka_port * wka_port,u64 req_id)432*4882a593Smuzhiyun void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
433*4882a593Smuzhiyun u64 req_id)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun struct zfcp_dbf *dbf = wka_port->adapter->dbf;
436*4882a593Smuzhiyun struct zfcp_dbf_rec *rec = &dbf->rec_buf;
437*4882a593Smuzhiyun static int const level = 1;
438*4882a593Smuzhiyun unsigned long flags;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->rec, level)))
441*4882a593Smuzhiyun return;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun spin_lock_irqsave(&dbf->rec_lock, flags);
444*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun rec->id = ZFCP_DBF_REC_RUN;
447*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
448*4882a593Smuzhiyun rec->port_status = wka_port->status;
449*4882a593Smuzhiyun rec->d_id = wka_port->d_id;
450*4882a593Smuzhiyun rec->lun = ZFCP_DBF_INVALID_LUN;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun rec->u.run.fsf_req_id = req_id;
453*4882a593Smuzhiyun rec->u.run.rec_status = ~0;
454*4882a593Smuzhiyun rec->u.run.rec_step = ~0;
455*4882a593Smuzhiyun rec->u.run.rec_action = ~0;
456*4882a593Smuzhiyun rec->u.run.rec_count = ~0;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun debug_event(dbf->rec, level, rec, sizeof(*rec));
459*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->rec_lock, flags);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun #define ZFCP_DBF_SAN_LEVEL 1
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun static inline
zfcp_dbf_san(char * tag,struct zfcp_dbf * dbf,char * paytag,struct scatterlist * sg,u8 id,u16 len,u64 req_id,u32 d_id,u16 cap_len)465*4882a593Smuzhiyun void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
466*4882a593Smuzhiyun char *paytag, struct scatterlist *sg, u8 id, u16 len,
467*4882a593Smuzhiyun u64 req_id, u32 d_id, u16 cap_len)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct zfcp_dbf_san *rec = &dbf->san_buf;
470*4882a593Smuzhiyun u16 rec_len;
471*4882a593Smuzhiyun unsigned long flags;
472*4882a593Smuzhiyun struct zfcp_dbf_pay *payload = &dbf->pay_buf;
473*4882a593Smuzhiyun u16 pay_sum = 0;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun spin_lock_irqsave(&dbf->san_lock, flags);
476*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun rec->id = id;
479*4882a593Smuzhiyun rec->fsf_req_id = req_id;
480*4882a593Smuzhiyun rec->d_id = d_id;
481*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
482*4882a593Smuzhiyun rec->pl_len = len; /* full length even if we cap pay below */
483*4882a593Smuzhiyun if (!sg)
484*4882a593Smuzhiyun goto out;
485*4882a593Smuzhiyun rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
486*4882a593Smuzhiyun memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
487*4882a593Smuzhiyun if (len <= rec_len)
488*4882a593Smuzhiyun goto out; /* skip pay record if full content in rec->payload */
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* if (len > rec_len):
491*4882a593Smuzhiyun * dump data up to cap_len ignoring small duplicate in rec->payload
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun spin_lock(&dbf->pay_lock);
494*4882a593Smuzhiyun memset(payload, 0, sizeof(*payload));
495*4882a593Smuzhiyun memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
496*4882a593Smuzhiyun payload->fsf_req_id = req_id;
497*4882a593Smuzhiyun payload->counter = 0;
498*4882a593Smuzhiyun for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
499*4882a593Smuzhiyun u16 pay_len, offset = 0;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun while (offset < sg->length && pay_sum < cap_len) {
502*4882a593Smuzhiyun pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
503*4882a593Smuzhiyun (u16)(sg->length - offset));
504*4882a593Smuzhiyun /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
505*4882a593Smuzhiyun memcpy(payload->data, sg_virt(sg) + offset, pay_len);
506*4882a593Smuzhiyun debug_event(dbf->pay, ZFCP_DBF_SAN_LEVEL, payload,
507*4882a593Smuzhiyun zfcp_dbf_plen(pay_len));
508*4882a593Smuzhiyun payload->counter++;
509*4882a593Smuzhiyun offset += pay_len;
510*4882a593Smuzhiyun pay_sum += pay_len;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun spin_unlock(&dbf->pay_lock);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun out:
516*4882a593Smuzhiyun debug_event(dbf->san, ZFCP_DBF_SAN_LEVEL, rec, sizeof(*rec));
517*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->san_lock, flags);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun * zfcp_dbf_san_req - trace event for issued SAN request
522*4882a593Smuzhiyun * @tag: identifier for event
523*4882a593Smuzhiyun * @fsf: request containing issued CT or ELS data
524*4882a593Smuzhiyun * @d_id: N_Port_ID where SAN request is sent to
525*4882a593Smuzhiyun * d_id: destination ID
526*4882a593Smuzhiyun */
zfcp_dbf_san_req(char * tag,struct zfcp_fsf_req * fsf,u32 d_id)527*4882a593Smuzhiyun void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct zfcp_dbf *dbf = fsf->adapter->dbf;
530*4882a593Smuzhiyun struct zfcp_fsf_ct_els *ct_els = fsf->data;
531*4882a593Smuzhiyun u16 length;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun length = (u16)zfcp_qdio_real_bytes(ct_els->req);
537*4882a593Smuzhiyun zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
538*4882a593Smuzhiyun length, fsf->req_id, d_id, length);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
zfcp_dbf_san_res_cap_len_if_gpn_ft(char * tag,struct zfcp_fsf_req * fsf,u16 len)541*4882a593Smuzhiyun static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
542*4882a593Smuzhiyun struct zfcp_fsf_req *fsf,
543*4882a593Smuzhiyun u16 len)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct zfcp_fsf_ct_els *ct_els = fsf->data;
546*4882a593Smuzhiyun struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
547*4882a593Smuzhiyun struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
548*4882a593Smuzhiyun struct scatterlist *resp_entry = ct_els->resp;
549*4882a593Smuzhiyun struct fc_ct_hdr *resph;
550*4882a593Smuzhiyun struct fc_gpn_ft_resp *acc;
551*4882a593Smuzhiyun int max_entries, x, last = 0;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun if (!(memcmp(tag, "fsscth2", 7) == 0
554*4882a593Smuzhiyun && ct_els->d_id == FC_FID_DIR_SERV
555*4882a593Smuzhiyun && reqh->ct_rev == FC_CT_REV
556*4882a593Smuzhiyun && reqh->ct_in_id[0] == 0
557*4882a593Smuzhiyun && reqh->ct_in_id[1] == 0
558*4882a593Smuzhiyun && reqh->ct_in_id[2] == 0
559*4882a593Smuzhiyun && reqh->ct_fs_type == FC_FST_DIR
560*4882a593Smuzhiyun && reqh->ct_fs_subtype == FC_NS_SUBTYPE
561*4882a593Smuzhiyun && reqh->ct_options == 0
562*4882a593Smuzhiyun && reqh->_ct_resvd1 == 0
563*4882a593Smuzhiyun && reqh->ct_cmd == cpu_to_be16(FC_NS_GPN_FT)
564*4882a593Smuzhiyun /* reqh->ct_mr_size can vary so do not match but read below */
565*4882a593Smuzhiyun && reqh->_ct_resvd2 == 0
566*4882a593Smuzhiyun && reqh->ct_reason == 0
567*4882a593Smuzhiyun && reqh->ct_explan == 0
568*4882a593Smuzhiyun && reqh->ct_vendor == 0
569*4882a593Smuzhiyun && reqn->fn_resvd == 0
570*4882a593Smuzhiyun && reqn->fn_domain_id_scope == 0
571*4882a593Smuzhiyun && reqn->fn_area_id_scope == 0
572*4882a593Smuzhiyun && reqn->fn_fc4_type == FC_TYPE_FCP))
573*4882a593Smuzhiyun return len; /* not GPN_FT response so do not cap */
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun acc = sg_virt(resp_entry);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /* cap all but accept CT responses to at least the CT header */
578*4882a593Smuzhiyun resph = (struct fc_ct_hdr *)acc;
579*4882a593Smuzhiyun if ((ct_els->status) ||
580*4882a593Smuzhiyun (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
581*4882a593Smuzhiyun return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun max_entries = (be16_to_cpu(reqh->ct_mr_size) * 4 /
584*4882a593Smuzhiyun sizeof(struct fc_gpn_ft_resp))
585*4882a593Smuzhiyun + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
586*4882a593Smuzhiyun * to account for header as 1st pseudo "entry" */;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
589*4882a593Smuzhiyun * response, allowing us to skip special handling for it - just skip it
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun for (x = 1; x < max_entries && !last; x++) {
592*4882a593Smuzhiyun if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
593*4882a593Smuzhiyun acc++;
594*4882a593Smuzhiyun else
595*4882a593Smuzhiyun acc = sg_virt(++resp_entry);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun last = acc->fp_flags & FC_NS_FID_LAST;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
600*4882a593Smuzhiyun return len; /* cap after last entry */
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /**
604*4882a593Smuzhiyun * zfcp_dbf_san_res - trace event for received SAN request
605*4882a593Smuzhiyun * @tag: identifier for event
606*4882a593Smuzhiyun * @fsf: request containing received CT or ELS data
607*4882a593Smuzhiyun */
zfcp_dbf_san_res(char * tag,struct zfcp_fsf_req * fsf)608*4882a593Smuzhiyun void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun struct zfcp_dbf *dbf = fsf->adapter->dbf;
611*4882a593Smuzhiyun struct zfcp_fsf_ct_els *ct_els = fsf->data;
612*4882a593Smuzhiyun u16 length;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
615*4882a593Smuzhiyun return;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
618*4882a593Smuzhiyun zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
619*4882a593Smuzhiyun length, fsf->req_id, ct_els->d_id,
620*4882a593Smuzhiyun zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun * zfcp_dbf_san_in_els - trace event for incoming ELS
625*4882a593Smuzhiyun * @tag: identifier for event
626*4882a593Smuzhiyun * @fsf: request containing received ELS data
627*4882a593Smuzhiyun */
zfcp_dbf_san_in_els(char * tag,struct zfcp_fsf_req * fsf)628*4882a593Smuzhiyun void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct zfcp_dbf *dbf = fsf->adapter->dbf;
631*4882a593Smuzhiyun struct fsf_status_read_buffer *srb =
632*4882a593Smuzhiyun (struct fsf_status_read_buffer *) fsf->data;
633*4882a593Smuzhiyun u16 length;
634*4882a593Smuzhiyun struct scatterlist sg;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(dbf->san, ZFCP_DBF_SAN_LEVEL)))
637*4882a593Smuzhiyun return;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun length = (u16)(srb->length -
640*4882a593Smuzhiyun offsetof(struct fsf_status_read_buffer, payload));
641*4882a593Smuzhiyun sg_init_one(&sg, srb->payload.data, length);
642*4882a593Smuzhiyun zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
643*4882a593Smuzhiyun fsf->req_id, ntoh24(srb->d_id), length);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /**
647*4882a593Smuzhiyun * zfcp_dbf_scsi_common() - Common trace event helper for scsi.
648*4882a593Smuzhiyun * @tag: Identifier for event.
649*4882a593Smuzhiyun * @level: trace level of event.
650*4882a593Smuzhiyun * @sdev: Pointer to SCSI device as context for this event.
651*4882a593Smuzhiyun * @sc: Pointer to SCSI command, or NULL with task management function (TMF).
652*4882a593Smuzhiyun * @fsf: Pointer to FSF request, or NULL.
653*4882a593Smuzhiyun */
zfcp_dbf_scsi_common(char * tag,int level,struct scsi_device * sdev,struct scsi_cmnd * sc,struct zfcp_fsf_req * fsf)654*4882a593Smuzhiyun void zfcp_dbf_scsi_common(char *tag, int level, struct scsi_device *sdev,
655*4882a593Smuzhiyun struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun struct zfcp_adapter *adapter =
658*4882a593Smuzhiyun (struct zfcp_adapter *) sdev->host->hostdata[0];
659*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
660*4882a593Smuzhiyun struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
661*4882a593Smuzhiyun struct fcp_resp_with_ext *fcp_rsp;
662*4882a593Smuzhiyun struct fcp_resp_rsp_info *fcp_rsp_info;
663*4882a593Smuzhiyun unsigned long flags;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun spin_lock_irqsave(&dbf->scsi_lock, flags);
666*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
669*4882a593Smuzhiyun rec->id = ZFCP_DBF_SCSI_CMND;
670*4882a593Smuzhiyun if (sc) {
671*4882a593Smuzhiyun rec->scsi_result = sc->result;
672*4882a593Smuzhiyun rec->scsi_retries = sc->retries;
673*4882a593Smuzhiyun rec->scsi_allowed = sc->allowed;
674*4882a593Smuzhiyun rec->scsi_id = sc->device->id;
675*4882a593Smuzhiyun rec->scsi_lun = (u32)sc->device->lun;
676*4882a593Smuzhiyun rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
677*4882a593Smuzhiyun rec->host_scribble = (unsigned long)sc->host_scribble;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun memcpy(rec->scsi_opcode, sc->cmnd,
680*4882a593Smuzhiyun min_t(int, sc->cmd_len, ZFCP_DBF_SCSI_OPCODE));
681*4882a593Smuzhiyun } else {
682*4882a593Smuzhiyun rec->scsi_result = ~0;
683*4882a593Smuzhiyun rec->scsi_retries = ~0;
684*4882a593Smuzhiyun rec->scsi_allowed = ~0;
685*4882a593Smuzhiyun rec->scsi_id = sdev->id;
686*4882a593Smuzhiyun rec->scsi_lun = (u32)sdev->lun;
687*4882a593Smuzhiyun rec->scsi_lun_64_hi = (u32)(sdev->lun >> 32);
688*4882a593Smuzhiyun rec->host_scribble = ~0;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun if (fsf) {
694*4882a593Smuzhiyun rec->fsf_req_id = fsf->req_id;
695*4882a593Smuzhiyun rec->pl_len = FCP_RESP_WITH_EXT;
696*4882a593Smuzhiyun fcp_rsp = &(fsf->qtcb->bottom.io.fcp_rsp.iu);
697*4882a593Smuzhiyun /* mandatory parts of FCP_RSP IU in this SCSI record */
698*4882a593Smuzhiyun memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
699*4882a593Smuzhiyun if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
700*4882a593Smuzhiyun fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
701*4882a593Smuzhiyun rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
702*4882a593Smuzhiyun rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
705*4882a593Smuzhiyun rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun /* complete FCP_RSP IU in associated PAYload record
708*4882a593Smuzhiyun * but only if there are optional parts
709*4882a593Smuzhiyun */
710*4882a593Smuzhiyun if (fcp_rsp->resp.fr_flags != 0)
711*4882a593Smuzhiyun zfcp_dbf_pl_write(
712*4882a593Smuzhiyun dbf, fcp_rsp,
713*4882a593Smuzhiyun /* at least one full PAY record
714*4882a593Smuzhiyun * but not beyond hardware response field
715*4882a593Smuzhiyun */
716*4882a593Smuzhiyun min_t(u16, max_t(u16, rec->pl_len,
717*4882a593Smuzhiyun ZFCP_DBF_PAY_MAX_REC),
718*4882a593Smuzhiyun FSF_FCP_RSP_SIZE),
719*4882a593Smuzhiyun "fcp_riu", fsf->req_id);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun debug_event(dbf->scsi, level, rec, sizeof(*rec));
723*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->scsi_lock, flags);
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /**
727*4882a593Smuzhiyun * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
728*4882a593Smuzhiyun * @tag: Identifier for event.
729*4882a593Smuzhiyun * @adapter: Pointer to zfcp adapter as context for this event.
730*4882a593Smuzhiyun * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
731*4882a593Smuzhiyun * @ret: Return value of calling function.
732*4882a593Smuzhiyun *
733*4882a593Smuzhiyun * This SCSI trace variant does not depend on any of:
734*4882a593Smuzhiyun * scsi_cmnd, zfcp_fsf_req, scsi_device.
735*4882a593Smuzhiyun */
zfcp_dbf_scsi_eh(char * tag,struct zfcp_adapter * adapter,unsigned int scsi_id,int ret)736*4882a593Smuzhiyun void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
737*4882a593Smuzhiyun unsigned int scsi_id, int ret)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
740*4882a593Smuzhiyun struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
741*4882a593Smuzhiyun unsigned long flags;
742*4882a593Smuzhiyun static int const level = 1;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
745*4882a593Smuzhiyun return;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun spin_lock_irqsave(&dbf->scsi_lock, flags);
748*4882a593Smuzhiyun memset(rec, 0, sizeof(*rec));
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
751*4882a593Smuzhiyun rec->id = ZFCP_DBF_SCSI_CMND;
752*4882a593Smuzhiyun rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
753*4882a593Smuzhiyun rec->scsi_retries = ~0;
754*4882a593Smuzhiyun rec->scsi_allowed = ~0;
755*4882a593Smuzhiyun rec->fcp_rsp_info = ~0;
756*4882a593Smuzhiyun rec->scsi_id = scsi_id;
757*4882a593Smuzhiyun rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
758*4882a593Smuzhiyun rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
759*4882a593Smuzhiyun rec->host_scribble = ~0;
760*4882a593Smuzhiyun memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun debug_event(dbf->scsi, level, rec, sizeof(*rec));
763*4882a593Smuzhiyun spin_unlock_irqrestore(&dbf->scsi_lock, flags);
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
zfcp_dbf_reg(const char * name,int size,int rec_size)766*4882a593Smuzhiyun static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun struct debug_info *d;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun d = debug_register(name, size, 1, rec_size);
771*4882a593Smuzhiyun if (!d)
772*4882a593Smuzhiyun return NULL;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun debug_register_view(d, &debug_hex_ascii_view);
775*4882a593Smuzhiyun debug_set_level(d, dbflevel);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun return d;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
zfcp_dbf_unregister(struct zfcp_dbf * dbf)780*4882a593Smuzhiyun static void zfcp_dbf_unregister(struct zfcp_dbf *dbf)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun if (!dbf)
783*4882a593Smuzhiyun return;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun debug_unregister(dbf->scsi);
786*4882a593Smuzhiyun debug_unregister(dbf->san);
787*4882a593Smuzhiyun debug_unregister(dbf->hba);
788*4882a593Smuzhiyun debug_unregister(dbf->pay);
789*4882a593Smuzhiyun debug_unregister(dbf->rec);
790*4882a593Smuzhiyun kfree(dbf);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /**
794*4882a593Smuzhiyun * zfcp_adapter_debug_register - registers debug feature for an adapter
795*4882a593Smuzhiyun * @adapter: pointer to adapter for which debug features should be registered
796*4882a593Smuzhiyun * return: -ENOMEM on error, 0 otherwise
797*4882a593Smuzhiyun */
zfcp_dbf_adapter_register(struct zfcp_adapter * adapter)798*4882a593Smuzhiyun int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun char name[DEBUG_MAX_NAME_LEN];
801*4882a593Smuzhiyun struct zfcp_dbf *dbf;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun dbf = kzalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
804*4882a593Smuzhiyun if (!dbf)
805*4882a593Smuzhiyun return -ENOMEM;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_lock_init(&dbf->pay_lock);
808*4882a593Smuzhiyun spin_lock_init(&dbf->hba_lock);
809*4882a593Smuzhiyun spin_lock_init(&dbf->san_lock);
810*4882a593Smuzhiyun spin_lock_init(&dbf->scsi_lock);
811*4882a593Smuzhiyun spin_lock_init(&dbf->rec_lock);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* debug feature area which records recovery activity */
814*4882a593Smuzhiyun sprintf(name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
815*4882a593Smuzhiyun dbf->rec = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_rec));
816*4882a593Smuzhiyun if (!dbf->rec)
817*4882a593Smuzhiyun goto err_out;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* debug feature area which records HBA (FSF and QDIO) conditions */
820*4882a593Smuzhiyun sprintf(name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
821*4882a593Smuzhiyun dbf->hba = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_hba));
822*4882a593Smuzhiyun if (!dbf->hba)
823*4882a593Smuzhiyun goto err_out;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /* debug feature area which records payload info */
826*4882a593Smuzhiyun sprintf(name, "zfcp_%s_pay", dev_name(&adapter->ccw_device->dev));
827*4882a593Smuzhiyun dbf->pay = zfcp_dbf_reg(name, dbfsize * 2, sizeof(struct zfcp_dbf_pay));
828*4882a593Smuzhiyun if (!dbf->pay)
829*4882a593Smuzhiyun goto err_out;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* debug feature area which records SAN command failures and recovery */
832*4882a593Smuzhiyun sprintf(name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
833*4882a593Smuzhiyun dbf->san = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_san));
834*4882a593Smuzhiyun if (!dbf->san)
835*4882a593Smuzhiyun goto err_out;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun /* debug feature area which records SCSI command failures and recovery */
838*4882a593Smuzhiyun sprintf(name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
839*4882a593Smuzhiyun dbf->scsi = zfcp_dbf_reg(name, dbfsize, sizeof(struct zfcp_dbf_scsi));
840*4882a593Smuzhiyun if (!dbf->scsi)
841*4882a593Smuzhiyun goto err_out;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun adapter->dbf = dbf;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return 0;
846*4882a593Smuzhiyun err_out:
847*4882a593Smuzhiyun zfcp_dbf_unregister(dbf);
848*4882a593Smuzhiyun return -ENOMEM;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /**
852*4882a593Smuzhiyun * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
853*4882a593Smuzhiyun * @adapter: pointer to adapter for which debug features should be unregistered
854*4882a593Smuzhiyun */
zfcp_dbf_adapter_unregister(struct zfcp_adapter * adapter)855*4882a593Smuzhiyun void zfcp_dbf_adapter_unregister(struct zfcp_adapter *adapter)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct zfcp_dbf *dbf = adapter->dbf;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun adapter->dbf = NULL;
860*4882a593Smuzhiyun zfcp_dbf_unregister(dbf);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863