1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2007 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
5*4882a593Smuzhiyun * Copyright(c) 2008 Mike Christie
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Maintained at www.Open-FCoE.org
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * Fibre Channel exchange and sequence handling.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/timer.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun #include <linux/log2.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <scsi/fc/fc_fc2.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <scsi/libfc.h>
23*4882a593Smuzhiyun #include <scsi/fc_encode.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "fc_libfc.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun u16 fc_cpu_mask; /* cpu mask for possible cpus */
28*4882a593Smuzhiyun EXPORT_SYMBOL(fc_cpu_mask);
29*4882a593Smuzhiyun static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
30*4882a593Smuzhiyun static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
31*4882a593Smuzhiyun static struct workqueue_struct *fc_exch_workqueue;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * Structure and function definitions for managing Fibre Channel Exchanges
35*4882a593Smuzhiyun * and Sequences.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * fc_exch_mgr holds the exchange state for an N port
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * fc_exch holds state for one exchange and links to its active sequence.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * fc_seq holds the state for an individual sequence.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun * struct fc_exch_pool - Per cpu exchange pool
48*4882a593Smuzhiyun * @next_index: Next possible free exchange index
49*4882a593Smuzhiyun * @total_exches: Total allocated exchanges
50*4882a593Smuzhiyun * @lock: Exch pool lock
51*4882a593Smuzhiyun * @ex_list: List of exchanges
52*4882a593Smuzhiyun * @left: Cache of free slot in exch array
53*4882a593Smuzhiyun * @right: Cache of free slot in exch array
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * This structure manages per cpu exchanges in array of exchange pointers.
56*4882a593Smuzhiyun * This array is allocated followed by struct fc_exch_pool memory for
57*4882a593Smuzhiyun * assigned range of exchanges to per cpu pool.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun struct fc_exch_pool {
60*4882a593Smuzhiyun spinlock_t lock;
61*4882a593Smuzhiyun struct list_head ex_list;
62*4882a593Smuzhiyun u16 next_index;
63*4882a593Smuzhiyun u16 total_exches;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun u16 left;
66*4882a593Smuzhiyun u16 right;
67*4882a593Smuzhiyun } ____cacheline_aligned_in_smp;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /**
70*4882a593Smuzhiyun * struct fc_exch_mgr - The Exchange Manager (EM).
71*4882a593Smuzhiyun * @class: Default class for new sequences
72*4882a593Smuzhiyun * @kref: Reference counter
73*4882a593Smuzhiyun * @min_xid: Minimum exchange ID
74*4882a593Smuzhiyun * @max_xid: Maximum exchange ID
75*4882a593Smuzhiyun * @ep_pool: Reserved exchange pointers
76*4882a593Smuzhiyun * @pool_max_index: Max exch array index in exch pool
77*4882a593Smuzhiyun * @pool: Per cpu exch pool
78*4882a593Smuzhiyun * @lport: Local exchange port
79*4882a593Smuzhiyun * @stats: Statistics structure
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * This structure is the center for creating exchanges and sequences.
82*4882a593Smuzhiyun * It manages the allocation of exchange IDs.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun struct fc_exch_mgr {
85*4882a593Smuzhiyun struct fc_exch_pool __percpu *pool;
86*4882a593Smuzhiyun mempool_t *ep_pool;
87*4882a593Smuzhiyun struct fc_lport *lport;
88*4882a593Smuzhiyun enum fc_class class;
89*4882a593Smuzhiyun struct kref kref;
90*4882a593Smuzhiyun u16 min_xid;
91*4882a593Smuzhiyun u16 max_xid;
92*4882a593Smuzhiyun u16 pool_max_index;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun struct {
95*4882a593Smuzhiyun atomic_t no_free_exch;
96*4882a593Smuzhiyun atomic_t no_free_exch_xid;
97*4882a593Smuzhiyun atomic_t xid_not_found;
98*4882a593Smuzhiyun atomic_t xid_busy;
99*4882a593Smuzhiyun atomic_t seq_not_found;
100*4882a593Smuzhiyun atomic_t non_bls_resp;
101*4882a593Smuzhiyun } stats;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun * struct fc_exch_mgr_anchor - primary structure for list of EMs
106*4882a593Smuzhiyun * @ema_list: Exchange Manager Anchor list
107*4882a593Smuzhiyun * @mp: Exchange Manager associated with this anchor
108*4882a593Smuzhiyun * @match: Routine to determine if this anchor's EM should be used
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * When walking the list of anchors the match routine will be called
111*4882a593Smuzhiyun * for each anchor to determine if that EM should be used. The last
112*4882a593Smuzhiyun * anchor in the list will always match to handle any exchanges not
113*4882a593Smuzhiyun * handled by other EMs. The non-default EMs would be added to the
114*4882a593Smuzhiyun * anchor list by HW that provides offloads.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun struct fc_exch_mgr_anchor {
117*4882a593Smuzhiyun struct list_head ema_list;
118*4882a593Smuzhiyun struct fc_exch_mgr *mp;
119*4882a593Smuzhiyun bool (*match)(struct fc_frame *);
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun static void fc_exch_rrq(struct fc_exch *);
123*4882a593Smuzhiyun static void fc_seq_ls_acc(struct fc_frame *);
124*4882a593Smuzhiyun static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
125*4882a593Smuzhiyun enum fc_els_rjt_explan);
126*4882a593Smuzhiyun static void fc_exch_els_rec(struct fc_frame *);
127*4882a593Smuzhiyun static void fc_exch_els_rrq(struct fc_frame *);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Internal implementation notes.
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * The exchange manager is one by default in libfc but LLD may choose
133*4882a593Smuzhiyun * to have one per CPU. The sequence manager is one per exchange manager
134*4882a593Smuzhiyun * and currently never separated.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
137*4882a593Smuzhiyun * assigned by the Sequence Initiator that shall be unique for a specific
138*4882a593Smuzhiyun * D_ID and S_ID pair while the Sequence is open." Note that it isn't
139*4882a593Smuzhiyun * qualified by exchange ID, which one might think it would be.
140*4882a593Smuzhiyun * In practice this limits the number of open sequences and exchanges to 256
141*4882a593Smuzhiyun * per session. For most targets we could treat this limit as per exchange.
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * The exchange and its sequence are freed when the last sequence is received.
144*4882a593Smuzhiyun * It's possible for the remote port to leave an exchange open without
145*4882a593Smuzhiyun * sending any sequences.
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * Notes on reference counts:
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * Exchanges are reference counted and exchange gets freed when the reference
150*4882a593Smuzhiyun * count becomes zero.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * Timeouts:
153*4882a593Smuzhiyun * Sequences are timed out for E_D_TOV and R_A_TOV.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * Sequence event handling:
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * The following events may occur on initiator sequences:
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Send.
160*4882a593Smuzhiyun * For now, the whole thing is sent.
161*4882a593Smuzhiyun * Receive ACK
162*4882a593Smuzhiyun * This applies only to class F.
163*4882a593Smuzhiyun * The sequence is marked complete.
164*4882a593Smuzhiyun * ULP completion.
165*4882a593Smuzhiyun * The upper layer calls fc_exch_done() when done
166*4882a593Smuzhiyun * with exchange and sequence tuple.
167*4882a593Smuzhiyun * RX-inferred completion.
168*4882a593Smuzhiyun * When we receive the next sequence on the same exchange, we can
169*4882a593Smuzhiyun * retire the previous sequence ID. (XXX not implemented).
170*4882a593Smuzhiyun * Timeout.
171*4882a593Smuzhiyun * R_A_TOV frees the sequence ID. If we're waiting for ACK,
172*4882a593Smuzhiyun * E_D_TOV causes abort and calls upper layer response handler
173*4882a593Smuzhiyun * with FC_EX_TIMEOUT error.
174*4882a593Smuzhiyun * Receive RJT
175*4882a593Smuzhiyun * XXX defer.
176*4882a593Smuzhiyun * Send ABTS
177*4882a593Smuzhiyun * On timeout.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * The following events may occur on recipient sequences:
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * Receive
182*4882a593Smuzhiyun * Allocate sequence for first frame received.
183*4882a593Smuzhiyun * Hold during receive handler.
184*4882a593Smuzhiyun * Release when final frame received.
185*4882a593Smuzhiyun * Keep status of last N of these for the ELS RES command. XXX TBD.
186*4882a593Smuzhiyun * Receive ABTS
187*4882a593Smuzhiyun * Deallocate sequence
188*4882a593Smuzhiyun * Send RJT
189*4882a593Smuzhiyun * Deallocate
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * For now, we neglect conditions where only part of a sequence was
192*4882a593Smuzhiyun * received or transmitted, or where out-of-order receipt is detected.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * Locking notes:
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun * The EM code run in a per-CPU worker thread.
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * To protect against concurrency between a worker thread code and timers,
201*4882a593Smuzhiyun * sequence allocation and deallocation must be locked.
202*4882a593Smuzhiyun * - exchange refcnt can be done atomicly without locks.
203*4882a593Smuzhiyun * - sequence allocation must be locked by exch lock.
204*4882a593Smuzhiyun * - If the EM pool lock and ex_lock must be taken at the same time, then the
205*4882a593Smuzhiyun * EM pool lock must be taken before the ex_lock.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * opcode names for debugging.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * fc_exch_name_lookup() - Lookup name by opcode
215*4882a593Smuzhiyun * @op: Opcode to be looked up
216*4882a593Smuzhiyun * @table: Opcode/name table
217*4882a593Smuzhiyun * @max_index: Index not to be exceeded
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * This routine is used to determine a human-readable string identifying
220*4882a593Smuzhiyun * a R_CTL opcode.
221*4882a593Smuzhiyun */
fc_exch_name_lookup(unsigned int op,char ** table,unsigned int max_index)222*4882a593Smuzhiyun static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
223*4882a593Smuzhiyun unsigned int max_index)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun const char *name = NULL;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (op < max_index)
228*4882a593Smuzhiyun name = table[op];
229*4882a593Smuzhiyun if (!name)
230*4882a593Smuzhiyun name = "unknown";
231*4882a593Smuzhiyun return name;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /**
235*4882a593Smuzhiyun * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
236*4882a593Smuzhiyun * @op: The opcode to be looked up
237*4882a593Smuzhiyun */
fc_exch_rctl_name(unsigned int op)238*4882a593Smuzhiyun static const char *fc_exch_rctl_name(unsigned int op)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun return fc_exch_name_lookup(op, fc_exch_rctl_names,
241*4882a593Smuzhiyun ARRAY_SIZE(fc_exch_rctl_names));
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun * fc_exch_hold() - Increment an exchange's reference count
246*4882a593Smuzhiyun * @ep: Echange to be held
247*4882a593Smuzhiyun */
fc_exch_hold(struct fc_exch * ep)248*4882a593Smuzhiyun static inline void fc_exch_hold(struct fc_exch *ep)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun atomic_inc(&ep->ex_refcnt);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
255*4882a593Smuzhiyun * and determine SOF and EOF.
256*4882a593Smuzhiyun * @ep: The exchange to that will use the header
257*4882a593Smuzhiyun * @fp: The frame whose header is to be modified
258*4882a593Smuzhiyun * @f_ctl: F_CTL bits that will be used for the frame header
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
261*4882a593Smuzhiyun * fh_seq_id, fh_seq_cnt and the SOF and EOF.
262*4882a593Smuzhiyun */
fc_exch_setup_hdr(struct fc_exch * ep,struct fc_frame * fp,u32 f_ctl)263*4882a593Smuzhiyun static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
264*4882a593Smuzhiyun u32 f_ctl)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
267*4882a593Smuzhiyun u16 fill;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun fr_sof(fp) = ep->class;
270*4882a593Smuzhiyun if (ep->seq.cnt)
271*4882a593Smuzhiyun fr_sof(fp) = fc_sof_normal(ep->class);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun if (f_ctl & FC_FC_END_SEQ) {
274*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_T;
275*4882a593Smuzhiyun if (fc_sof_needs_ack(ep->class))
276*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_N;
277*4882a593Smuzhiyun /*
278*4882a593Smuzhiyun * From F_CTL.
279*4882a593Smuzhiyun * The number of fill bytes to make the length a 4-byte
280*4882a593Smuzhiyun * multiple is the low order 2-bits of the f_ctl.
281*4882a593Smuzhiyun * The fill itself will have been cleared by the frame
282*4882a593Smuzhiyun * allocation.
283*4882a593Smuzhiyun * After this, the length will be even, as expected by
284*4882a593Smuzhiyun * the transport.
285*4882a593Smuzhiyun */
286*4882a593Smuzhiyun fill = fr_len(fp) & 3;
287*4882a593Smuzhiyun if (fill) {
288*4882a593Smuzhiyun fill = 4 - fill;
289*4882a593Smuzhiyun /* TODO, this may be a problem with fragmented skb */
290*4882a593Smuzhiyun skb_put(fp_skb(fp), fill);
291*4882a593Smuzhiyun hton24(fh->fh_f_ctl, f_ctl | fill);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
295*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_N;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Initialize remaining fh fields from fc_fill_fc_hdr */
299*4882a593Smuzhiyun fh->fh_ox_id = htons(ep->oxid);
300*4882a593Smuzhiyun fh->fh_rx_id = htons(ep->rxid);
301*4882a593Smuzhiyun fh->fh_seq_id = ep->seq.id;
302*4882a593Smuzhiyun fh->fh_seq_cnt = htons(ep->seq.cnt);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /**
306*4882a593Smuzhiyun * fc_exch_release() - Decrement an exchange's reference count
307*4882a593Smuzhiyun * @ep: Exchange to be released
308*4882a593Smuzhiyun *
309*4882a593Smuzhiyun * If the reference count reaches zero and the exchange is complete,
310*4882a593Smuzhiyun * it is freed.
311*4882a593Smuzhiyun */
fc_exch_release(struct fc_exch * ep)312*4882a593Smuzhiyun static void fc_exch_release(struct fc_exch *ep)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun struct fc_exch_mgr *mp;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (atomic_dec_and_test(&ep->ex_refcnt)) {
317*4882a593Smuzhiyun mp = ep->em;
318*4882a593Smuzhiyun if (ep->destructor)
319*4882a593Smuzhiyun ep->destructor(&ep->seq, ep->arg);
320*4882a593Smuzhiyun WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
321*4882a593Smuzhiyun mempool_free(ep, mp->ep_pool);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun * fc_exch_timer_cancel() - cancel exch timer
327*4882a593Smuzhiyun * @ep: The exchange whose timer to be canceled
328*4882a593Smuzhiyun */
fc_exch_timer_cancel(struct fc_exch * ep)329*4882a593Smuzhiyun static inline void fc_exch_timer_cancel(struct fc_exch *ep)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun if (cancel_delayed_work(&ep->timeout_work)) {
332*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Exchange timer canceled\n");
333*4882a593Smuzhiyun atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /**
338*4882a593Smuzhiyun * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
339*4882a593Smuzhiyun * the exchange lock held
340*4882a593Smuzhiyun * @ep: The exchange whose timer will start
341*4882a593Smuzhiyun * @timer_msec: The timeout period
342*4882a593Smuzhiyun *
343*4882a593Smuzhiyun * Used for upper level protocols to time out the exchange.
344*4882a593Smuzhiyun * The timer is cancelled when it fires or when the exchange completes.
345*4882a593Smuzhiyun */
fc_exch_timer_set_locked(struct fc_exch * ep,unsigned int timer_msec)346*4882a593Smuzhiyun static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
347*4882a593Smuzhiyun unsigned int timer_msec)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun fc_exch_hold(ep); /* hold for timer */
355*4882a593Smuzhiyun if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
356*4882a593Smuzhiyun msecs_to_jiffies(timer_msec))) {
357*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Exchange already queued\n");
358*4882a593Smuzhiyun fc_exch_release(ep);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /**
363*4882a593Smuzhiyun * fc_exch_timer_set() - Lock the exchange and set the timer
364*4882a593Smuzhiyun * @ep: The exchange whose timer will start
365*4882a593Smuzhiyun * @timer_msec: The timeout period
366*4882a593Smuzhiyun */
fc_exch_timer_set(struct fc_exch * ep,unsigned int timer_msec)367*4882a593Smuzhiyun static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
370*4882a593Smuzhiyun fc_exch_timer_set_locked(ep, timer_msec);
371*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * fc_exch_done_locked() - Complete an exchange with the exchange lock held
376*4882a593Smuzhiyun * @ep: The exchange that is complete
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * Note: May sleep if invoked from outside a response handler.
379*4882a593Smuzhiyun */
fc_exch_done_locked(struct fc_exch * ep)380*4882a593Smuzhiyun static int fc_exch_done_locked(struct fc_exch *ep)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun int rc = 1;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * We must check for completion in case there are two threads
386*4882a593Smuzhiyun * tyring to complete this. But the rrq code will reuse the
387*4882a593Smuzhiyun * ep, and in that case we only clear the resp and set it as
388*4882a593Smuzhiyun * complete, so it can be reused by the timer to send the rrq.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun if (ep->state & FC_EX_DONE)
391*4882a593Smuzhiyun return rc;
392*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_COMPLETE;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
395*4882a593Smuzhiyun ep->state |= FC_EX_DONE;
396*4882a593Smuzhiyun fc_exch_timer_cancel(ep);
397*4882a593Smuzhiyun rc = 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun return rc;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun static struct fc_exch fc_quarantine_exch;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun * fc_exch_ptr_get() - Return an exchange from an exchange pool
406*4882a593Smuzhiyun * @pool: Exchange Pool to get an exchange from
407*4882a593Smuzhiyun * @index: Index of the exchange within the pool
408*4882a593Smuzhiyun *
409*4882a593Smuzhiyun * Use the index to get an exchange from within an exchange pool. exches
410*4882a593Smuzhiyun * will point to an array of exchange pointers. The index will select
411*4882a593Smuzhiyun * the exchange within the array.
412*4882a593Smuzhiyun */
fc_exch_ptr_get(struct fc_exch_pool * pool,u16 index)413*4882a593Smuzhiyun static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
414*4882a593Smuzhiyun u16 index)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun struct fc_exch **exches = (struct fc_exch **)(pool + 1);
417*4882a593Smuzhiyun return exches[index];
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /**
421*4882a593Smuzhiyun * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
422*4882a593Smuzhiyun * @pool: The pool to assign the exchange to
423*4882a593Smuzhiyun * @index: The index in the pool where the exchange will be assigned
424*4882a593Smuzhiyun * @ep: The exchange to assign to the pool
425*4882a593Smuzhiyun */
fc_exch_ptr_set(struct fc_exch_pool * pool,u16 index,struct fc_exch * ep)426*4882a593Smuzhiyun static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
427*4882a593Smuzhiyun struct fc_exch *ep)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun ((struct fc_exch **)(pool + 1))[index] = ep;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /**
433*4882a593Smuzhiyun * fc_exch_delete() - Delete an exchange
434*4882a593Smuzhiyun * @ep: The exchange to be deleted
435*4882a593Smuzhiyun */
fc_exch_delete(struct fc_exch * ep)436*4882a593Smuzhiyun static void fc_exch_delete(struct fc_exch *ep)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct fc_exch_pool *pool;
439*4882a593Smuzhiyun u16 index;
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun pool = ep->pool;
442*4882a593Smuzhiyun spin_lock_bh(&pool->lock);
443*4882a593Smuzhiyun WARN_ON(pool->total_exches <= 0);
444*4882a593Smuzhiyun pool->total_exches--;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* update cache of free slot */
447*4882a593Smuzhiyun index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
448*4882a593Smuzhiyun if (!(ep->state & FC_EX_QUARANTINE)) {
449*4882a593Smuzhiyun if (pool->left == FC_XID_UNKNOWN)
450*4882a593Smuzhiyun pool->left = index;
451*4882a593Smuzhiyun else if (pool->right == FC_XID_UNKNOWN)
452*4882a593Smuzhiyun pool->right = index;
453*4882a593Smuzhiyun else
454*4882a593Smuzhiyun pool->next_index = index;
455*4882a593Smuzhiyun fc_exch_ptr_set(pool, index, NULL);
456*4882a593Smuzhiyun } else {
457*4882a593Smuzhiyun fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun list_del(&ep->ex_list);
460*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
461*4882a593Smuzhiyun fc_exch_release(ep); /* drop hold for exch in mp */
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
fc_seq_send_locked(struct fc_lport * lport,struct fc_seq * sp,struct fc_frame * fp)464*4882a593Smuzhiyun static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
465*4882a593Smuzhiyun struct fc_frame *fp)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun struct fc_exch *ep;
468*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
469*4882a593Smuzhiyun int error = -ENXIO;
470*4882a593Smuzhiyun u32 f_ctl;
471*4882a593Smuzhiyun u8 fh_type = fh->fh_type;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun ep = fc_seq_exch(sp);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
476*4882a593Smuzhiyun fc_frame_free(fp);
477*4882a593Smuzhiyun goto out;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
483*4882a593Smuzhiyun fc_exch_setup_hdr(ep, fp, f_ctl);
484*4882a593Smuzhiyun fr_encaps(fp) = ep->encaps;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * update sequence count if this frame is carrying
488*4882a593Smuzhiyun * multiple FC frames when sequence offload is enabled
489*4882a593Smuzhiyun * by LLD.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun if (fr_max_payload(fp))
492*4882a593Smuzhiyun sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
493*4882a593Smuzhiyun fr_max_payload(fp));
494*4882a593Smuzhiyun else
495*4882a593Smuzhiyun sp->cnt++;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /*
498*4882a593Smuzhiyun * Send the frame.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun error = lport->tt.frame_send(lport, fp);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (fh_type == FC_TYPE_BLS)
503*4882a593Smuzhiyun goto out;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * Update the exchange and sequence flags,
507*4882a593Smuzhiyun * assuming all frames for the sequence have been sent.
508*4882a593Smuzhiyun * We can only be called to send once for each sequence.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
511*4882a593Smuzhiyun if (f_ctl & FC_FC_SEQ_INIT)
512*4882a593Smuzhiyun ep->esb_stat &= ~ESB_ST_SEQ_INIT;
513*4882a593Smuzhiyun out:
514*4882a593Smuzhiyun return error;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /**
518*4882a593Smuzhiyun * fc_seq_send() - Send a frame using existing sequence/exchange pair
519*4882a593Smuzhiyun * @lport: The local port that the exchange will be sent on
520*4882a593Smuzhiyun * @sp: The sequence to be sent
521*4882a593Smuzhiyun * @fp: The frame to be sent on the exchange
522*4882a593Smuzhiyun *
523*4882a593Smuzhiyun * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
524*4882a593Smuzhiyun * or indirectly by calling libfc_function_template.frame_send().
525*4882a593Smuzhiyun */
fc_seq_send(struct fc_lport * lport,struct fc_seq * sp,struct fc_frame * fp)526*4882a593Smuzhiyun int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct fc_exch *ep;
529*4882a593Smuzhiyun int error;
530*4882a593Smuzhiyun ep = fc_seq_exch(sp);
531*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
532*4882a593Smuzhiyun error = fc_seq_send_locked(lport, sp, fp);
533*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
534*4882a593Smuzhiyun return error;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun EXPORT_SYMBOL(fc_seq_send);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun * fc_seq_alloc() - Allocate a sequence for a given exchange
540*4882a593Smuzhiyun * @ep: The exchange to allocate a new sequence for
541*4882a593Smuzhiyun * @seq_id: The sequence ID to be used
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * We don't support multiple originated sequences on the same exchange.
544*4882a593Smuzhiyun * By implication, any previously originated sequence on this exchange
545*4882a593Smuzhiyun * is complete, and we reallocate the same sequence.
546*4882a593Smuzhiyun */
fc_seq_alloc(struct fc_exch * ep,u8 seq_id)547*4882a593Smuzhiyun static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct fc_seq *sp;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun sp = &ep->seq;
552*4882a593Smuzhiyun sp->ssb_stat = 0;
553*4882a593Smuzhiyun sp->cnt = 0;
554*4882a593Smuzhiyun sp->id = seq_id;
555*4882a593Smuzhiyun return sp;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun /**
559*4882a593Smuzhiyun * fc_seq_start_next_locked() - Allocate a new sequence on the same
560*4882a593Smuzhiyun * exchange as the supplied sequence
561*4882a593Smuzhiyun * @sp: The sequence/exchange to get a new sequence for
562*4882a593Smuzhiyun */
fc_seq_start_next_locked(struct fc_seq * sp)563*4882a593Smuzhiyun static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun sp = fc_seq_alloc(ep, ep->seq_id++);
568*4882a593Smuzhiyun FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
569*4882a593Smuzhiyun ep->f_ctl, sp->id);
570*4882a593Smuzhiyun return sp;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun * fc_seq_start_next() - Lock the exchange and get a new sequence
575*4882a593Smuzhiyun * for a given sequence/exchange pair
576*4882a593Smuzhiyun * @sp: The sequence/exchange to get a new exchange for
577*4882a593Smuzhiyun */
fc_seq_start_next(struct fc_seq * sp)578*4882a593Smuzhiyun struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
583*4882a593Smuzhiyun sp = fc_seq_start_next_locked(sp);
584*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun return sp;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun EXPORT_SYMBOL(fc_seq_start_next);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * Set the response handler for the exchange associated with a sequence.
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * Note: May sleep if invoked from outside a response handler.
594*4882a593Smuzhiyun */
fc_seq_set_resp(struct fc_seq * sp,void (* resp)(struct fc_seq *,struct fc_frame *,void *),void * arg)595*4882a593Smuzhiyun void fc_seq_set_resp(struct fc_seq *sp,
596*4882a593Smuzhiyun void (*resp)(struct fc_seq *, struct fc_frame *, void *),
597*4882a593Smuzhiyun void *arg)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
600*4882a593Smuzhiyun DEFINE_WAIT(wait);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
603*4882a593Smuzhiyun while (ep->resp_active && ep->resp_task != current) {
604*4882a593Smuzhiyun prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
605*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun schedule();
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun finish_wait(&ep->resp_wq, &wait);
612*4882a593Smuzhiyun ep->resp = resp;
613*4882a593Smuzhiyun ep->arg = arg;
614*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun EXPORT_SYMBOL(fc_seq_set_resp);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /**
619*4882a593Smuzhiyun * fc_exch_abort_locked() - Abort an exchange
620*4882a593Smuzhiyun * @ep: The exchange to be aborted
621*4882a593Smuzhiyun * @timer_msec: The period of time to wait before aborting
622*4882a593Smuzhiyun *
623*4882a593Smuzhiyun * Abort an exchange and sequence. Generally called because of a
624*4882a593Smuzhiyun * exchange timeout or an abort from the upper layer.
625*4882a593Smuzhiyun *
626*4882a593Smuzhiyun * A timer_msec can be specified for abort timeout, if non-zero
627*4882a593Smuzhiyun * timer_msec value is specified then exchange resp handler
628*4882a593Smuzhiyun * will be called with timeout error if no response to abort.
629*4882a593Smuzhiyun *
630*4882a593Smuzhiyun * Locking notes: Called with exch lock held
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * Return value: 0 on success else error code
633*4882a593Smuzhiyun */
fc_exch_abort_locked(struct fc_exch * ep,unsigned int timer_msec)634*4882a593Smuzhiyun static int fc_exch_abort_locked(struct fc_exch *ep,
635*4882a593Smuzhiyun unsigned int timer_msec)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct fc_seq *sp;
638*4882a593Smuzhiyun struct fc_frame *fp;
639*4882a593Smuzhiyun int error;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
642*4882a593Smuzhiyun if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
643*4882a593Smuzhiyun ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
644*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
645*4882a593Smuzhiyun ep->esb_stat, ep->state);
646*4882a593Smuzhiyun return -ENXIO;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /*
650*4882a593Smuzhiyun * Send the abort on a new sequence if possible.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun sp = fc_seq_start_next_locked(&ep->seq);
653*4882a593Smuzhiyun if (!sp)
654*4882a593Smuzhiyun return -ENOMEM;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun if (timer_msec)
657*4882a593Smuzhiyun fc_exch_timer_set_locked(ep, timer_msec);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (ep->sid) {
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun * Send an abort for the sequence that timed out.
662*4882a593Smuzhiyun */
663*4882a593Smuzhiyun fp = fc_frame_alloc(ep->lp, 0);
664*4882a593Smuzhiyun if (fp) {
665*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_SEQ_INIT;
666*4882a593Smuzhiyun fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
667*4882a593Smuzhiyun FC_TYPE_BLS, FC_FC_END_SEQ |
668*4882a593Smuzhiyun FC_FC_SEQ_INIT, 0);
669*4882a593Smuzhiyun error = fc_seq_send_locked(ep->lp, sp, fp);
670*4882a593Smuzhiyun } else {
671*4882a593Smuzhiyun error = -ENOBUFS;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun } else {
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun * If not logged into the fabric, don't send ABTS but leave
676*4882a593Smuzhiyun * sequence active until next timeout.
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun error = 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_ABNORMAL;
681*4882a593Smuzhiyun return error;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun /**
685*4882a593Smuzhiyun * fc_seq_exch_abort() - Abort an exchange and sequence
686*4882a593Smuzhiyun * @req_sp: The sequence to be aborted
687*4882a593Smuzhiyun * @timer_msec: The period of time to wait before aborting
688*4882a593Smuzhiyun *
689*4882a593Smuzhiyun * Generally called because of a timeout or an abort from the upper layer.
690*4882a593Smuzhiyun *
691*4882a593Smuzhiyun * Return value: 0 on success else error code
692*4882a593Smuzhiyun */
fc_seq_exch_abort(const struct fc_seq * req_sp,unsigned int timer_msec)693*4882a593Smuzhiyun int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct fc_exch *ep;
696*4882a593Smuzhiyun int error;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ep = fc_seq_exch(req_sp);
699*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
700*4882a593Smuzhiyun error = fc_exch_abort_locked(ep, timer_msec);
701*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
702*4882a593Smuzhiyun return error;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /**
706*4882a593Smuzhiyun * fc_invoke_resp() - invoke ep->resp()
707*4882a593Smuzhiyun * @ep: The exchange to be operated on
708*4882a593Smuzhiyun * @fp: The frame pointer to pass through to ->resp()
709*4882a593Smuzhiyun * @sp: The sequence pointer to pass through to ->resp()
710*4882a593Smuzhiyun *
711*4882a593Smuzhiyun * Notes:
712*4882a593Smuzhiyun * It is assumed that after initialization finished (this means the
713*4882a593Smuzhiyun * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
714*4882a593Smuzhiyun * modified only via fc_seq_set_resp(). This guarantees that none of these
715*4882a593Smuzhiyun * two variables changes if ep->resp_active > 0.
716*4882a593Smuzhiyun *
717*4882a593Smuzhiyun * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
718*4882a593Smuzhiyun * this function is invoked, the first spin_lock_bh() call in this function
719*4882a593Smuzhiyun * will wait until fc_seq_set_resp() has finished modifying these variables.
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
722*4882a593Smuzhiyun * ep->resp() won't be invoked after fc_exch_done() has returned.
723*4882a593Smuzhiyun *
724*4882a593Smuzhiyun * The response handler itself may invoke fc_exch_done(), which will clear the
725*4882a593Smuzhiyun * ep->resp pointer.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * Return value:
728*4882a593Smuzhiyun * Returns true if and only if ep->resp has been invoked.
729*4882a593Smuzhiyun */
fc_invoke_resp(struct fc_exch * ep,struct fc_seq * sp,struct fc_frame * fp)730*4882a593Smuzhiyun static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
731*4882a593Smuzhiyun struct fc_frame *fp)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
734*4882a593Smuzhiyun void *arg;
735*4882a593Smuzhiyun bool res = false;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
738*4882a593Smuzhiyun ep->resp_active++;
739*4882a593Smuzhiyun if (ep->resp_task != current)
740*4882a593Smuzhiyun ep->resp_task = !ep->resp_task ? current : NULL;
741*4882a593Smuzhiyun resp = ep->resp;
742*4882a593Smuzhiyun arg = ep->arg;
743*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (resp) {
746*4882a593Smuzhiyun resp(sp, fp, arg);
747*4882a593Smuzhiyun res = true;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
751*4882a593Smuzhiyun if (--ep->resp_active == 0)
752*4882a593Smuzhiyun ep->resp_task = NULL;
753*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (ep->resp_active == 0)
756*4882a593Smuzhiyun wake_up(&ep->resp_wq);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun return res;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /**
762*4882a593Smuzhiyun * fc_exch_timeout() - Handle exchange timer expiration
763*4882a593Smuzhiyun * @work: The work_struct identifying the exchange that timed out
764*4882a593Smuzhiyun */
fc_exch_timeout(struct work_struct * work)765*4882a593Smuzhiyun static void fc_exch_timeout(struct work_struct *work)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun struct fc_exch *ep = container_of(work, struct fc_exch,
768*4882a593Smuzhiyun timeout_work.work);
769*4882a593Smuzhiyun struct fc_seq *sp = &ep->seq;
770*4882a593Smuzhiyun u32 e_stat;
771*4882a593Smuzhiyun int rc = 1;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
776*4882a593Smuzhiyun if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
777*4882a593Smuzhiyun goto unlock;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun e_stat = ep->esb_stat;
780*4882a593Smuzhiyun if (e_stat & ESB_ST_COMPLETE) {
781*4882a593Smuzhiyun ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
782*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
783*4882a593Smuzhiyun if (e_stat & ESB_ST_REC_QUAL)
784*4882a593Smuzhiyun fc_exch_rrq(ep);
785*4882a593Smuzhiyun goto done;
786*4882a593Smuzhiyun } else {
787*4882a593Smuzhiyun if (e_stat & ESB_ST_ABNORMAL)
788*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
789*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
790*4882a593Smuzhiyun if (!rc)
791*4882a593Smuzhiyun fc_exch_delete(ep);
792*4882a593Smuzhiyun fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
793*4882a593Smuzhiyun fc_seq_set_resp(sp, NULL, ep->arg);
794*4882a593Smuzhiyun fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
795*4882a593Smuzhiyun goto done;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun unlock:
798*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
799*4882a593Smuzhiyun done:
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * This release matches the hold taken when the timer was set.
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun fc_exch_release(ep);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /**
807*4882a593Smuzhiyun * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
808*4882a593Smuzhiyun * @lport: The local port that the exchange is for
809*4882a593Smuzhiyun * @mp: The exchange manager that will allocate the exchange
810*4882a593Smuzhiyun *
811*4882a593Smuzhiyun * Returns pointer to allocated fc_exch with exch lock held.
812*4882a593Smuzhiyun */
fc_exch_em_alloc(struct fc_lport * lport,struct fc_exch_mgr * mp)813*4882a593Smuzhiyun static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
814*4882a593Smuzhiyun struct fc_exch_mgr *mp)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun struct fc_exch *ep;
817*4882a593Smuzhiyun unsigned int cpu;
818*4882a593Smuzhiyun u16 index;
819*4882a593Smuzhiyun struct fc_exch_pool *pool;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /* allocate memory for exchange */
822*4882a593Smuzhiyun ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
823*4882a593Smuzhiyun if (!ep) {
824*4882a593Smuzhiyun atomic_inc(&mp->stats.no_free_exch);
825*4882a593Smuzhiyun goto out;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun memset(ep, 0, sizeof(*ep));
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun cpu = get_cpu();
830*4882a593Smuzhiyun pool = per_cpu_ptr(mp->pool, cpu);
831*4882a593Smuzhiyun spin_lock_bh(&pool->lock);
832*4882a593Smuzhiyun put_cpu();
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* peek cache of free slot */
835*4882a593Smuzhiyun if (pool->left != FC_XID_UNKNOWN) {
836*4882a593Smuzhiyun if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
837*4882a593Smuzhiyun index = pool->left;
838*4882a593Smuzhiyun pool->left = FC_XID_UNKNOWN;
839*4882a593Smuzhiyun goto hit;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun if (pool->right != FC_XID_UNKNOWN) {
843*4882a593Smuzhiyun if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
844*4882a593Smuzhiyun index = pool->right;
845*4882a593Smuzhiyun pool->right = FC_XID_UNKNOWN;
846*4882a593Smuzhiyun goto hit;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun index = pool->next_index;
851*4882a593Smuzhiyun /* allocate new exch from pool */
852*4882a593Smuzhiyun while (fc_exch_ptr_get(pool, index)) {
853*4882a593Smuzhiyun index = index == mp->pool_max_index ? 0 : index + 1;
854*4882a593Smuzhiyun if (index == pool->next_index)
855*4882a593Smuzhiyun goto err;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
858*4882a593Smuzhiyun hit:
859*4882a593Smuzhiyun fc_exch_hold(ep); /* hold for exch in mp */
860*4882a593Smuzhiyun spin_lock_init(&ep->ex_lock);
861*4882a593Smuzhiyun /*
862*4882a593Smuzhiyun * Hold exch lock for caller to prevent fc_exch_reset()
863*4882a593Smuzhiyun * from releasing exch while fc_exch_alloc() caller is
864*4882a593Smuzhiyun * still working on exch.
865*4882a593Smuzhiyun */
866*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun fc_exch_ptr_set(pool, index, ep);
869*4882a593Smuzhiyun list_add_tail(&ep->ex_list, &pool->ex_list);
870*4882a593Smuzhiyun fc_seq_alloc(ep, ep->seq_id++);
871*4882a593Smuzhiyun pool->total_exches++;
872*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /*
875*4882a593Smuzhiyun * update exchange
876*4882a593Smuzhiyun */
877*4882a593Smuzhiyun ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
878*4882a593Smuzhiyun ep->em = mp;
879*4882a593Smuzhiyun ep->pool = pool;
880*4882a593Smuzhiyun ep->lp = lport;
881*4882a593Smuzhiyun ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
882*4882a593Smuzhiyun ep->rxid = FC_XID_UNKNOWN;
883*4882a593Smuzhiyun ep->class = mp->class;
884*4882a593Smuzhiyun ep->resp_active = 0;
885*4882a593Smuzhiyun init_waitqueue_head(&ep->resp_wq);
886*4882a593Smuzhiyun INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
887*4882a593Smuzhiyun out:
888*4882a593Smuzhiyun return ep;
889*4882a593Smuzhiyun err:
890*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
891*4882a593Smuzhiyun atomic_inc(&mp->stats.no_free_exch_xid);
892*4882a593Smuzhiyun mempool_free(ep, mp->ep_pool);
893*4882a593Smuzhiyun return NULL;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /**
897*4882a593Smuzhiyun * fc_exch_alloc() - Allocate an exchange from an EM on a
898*4882a593Smuzhiyun * local port's list of EMs.
899*4882a593Smuzhiyun * @lport: The local port that will own the exchange
900*4882a593Smuzhiyun * @fp: The FC frame that the exchange will be for
901*4882a593Smuzhiyun *
902*4882a593Smuzhiyun * This function walks the list of exchange manager(EM)
903*4882a593Smuzhiyun * anchors to select an EM for a new exchange allocation. The
904*4882a593Smuzhiyun * EM is selected when a NULL match function pointer is encountered
905*4882a593Smuzhiyun * or when a call to a match function returns true.
906*4882a593Smuzhiyun */
fc_exch_alloc(struct fc_lport * lport,struct fc_frame * fp)907*4882a593Smuzhiyun static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
908*4882a593Smuzhiyun struct fc_frame *fp)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
911*4882a593Smuzhiyun struct fc_exch *ep;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list) {
914*4882a593Smuzhiyun if (!ema->match || ema->match(fp)) {
915*4882a593Smuzhiyun ep = fc_exch_em_alloc(lport, ema->mp);
916*4882a593Smuzhiyun if (ep)
917*4882a593Smuzhiyun return ep;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun return NULL;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /**
924*4882a593Smuzhiyun * fc_exch_find() - Lookup and hold an exchange
925*4882a593Smuzhiyun * @mp: The exchange manager to lookup the exchange from
926*4882a593Smuzhiyun * @xid: The XID of the exchange to look up
927*4882a593Smuzhiyun */
fc_exch_find(struct fc_exch_mgr * mp,u16 xid)928*4882a593Smuzhiyun static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun struct fc_lport *lport = mp->lport;
931*4882a593Smuzhiyun struct fc_exch_pool *pool;
932*4882a593Smuzhiyun struct fc_exch *ep = NULL;
933*4882a593Smuzhiyun u16 cpu = xid & fc_cpu_mask;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (xid == FC_XID_UNKNOWN)
936*4882a593Smuzhiyun return NULL;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
939*4882a593Smuzhiyun pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
940*4882a593Smuzhiyun lport->host->host_no, lport->port_id, xid, cpu);
941*4882a593Smuzhiyun return NULL;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
945*4882a593Smuzhiyun pool = per_cpu_ptr(mp->pool, cpu);
946*4882a593Smuzhiyun spin_lock_bh(&pool->lock);
947*4882a593Smuzhiyun ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
948*4882a593Smuzhiyun if (ep == &fc_quarantine_exch) {
949*4882a593Smuzhiyun FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
950*4882a593Smuzhiyun ep = NULL;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun if (ep) {
953*4882a593Smuzhiyun WARN_ON(ep->xid != xid);
954*4882a593Smuzhiyun fc_exch_hold(ep);
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun return ep;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /**
963*4882a593Smuzhiyun * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
964*4882a593Smuzhiyun * the memory allocated for the related objects may be freed.
965*4882a593Smuzhiyun * @sp: The sequence that has completed
966*4882a593Smuzhiyun *
967*4882a593Smuzhiyun * Note: May sleep if invoked from outside a response handler.
968*4882a593Smuzhiyun */
fc_exch_done(struct fc_seq * sp)969*4882a593Smuzhiyun void fc_exch_done(struct fc_seq *sp)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
972*4882a593Smuzhiyun int rc;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
975*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
976*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun fc_seq_set_resp(sp, NULL, ep->arg);
979*4882a593Smuzhiyun if (!rc)
980*4882a593Smuzhiyun fc_exch_delete(ep);
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_done);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /**
985*4882a593Smuzhiyun * fc_exch_resp() - Allocate a new exchange for a response frame
986*4882a593Smuzhiyun * @lport: The local port that the exchange was for
987*4882a593Smuzhiyun * @mp: The exchange manager to allocate the exchange from
988*4882a593Smuzhiyun * @fp: The response frame
989*4882a593Smuzhiyun *
990*4882a593Smuzhiyun * Sets the responder ID in the frame header.
991*4882a593Smuzhiyun */
fc_exch_resp(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)992*4882a593Smuzhiyun static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
993*4882a593Smuzhiyun struct fc_exch_mgr *mp,
994*4882a593Smuzhiyun struct fc_frame *fp)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun struct fc_exch *ep;
997*4882a593Smuzhiyun struct fc_frame_header *fh;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun ep = fc_exch_alloc(lport, fp);
1000*4882a593Smuzhiyun if (ep) {
1001*4882a593Smuzhiyun ep->class = fc_frame_class(fp);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun * Set EX_CTX indicating we're responding on this exchange.
1005*4882a593Smuzhiyun */
1006*4882a593Smuzhiyun ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
1007*4882a593Smuzhiyun ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
1008*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1009*4882a593Smuzhiyun ep->sid = ntoh24(fh->fh_d_id);
1010*4882a593Smuzhiyun ep->did = ntoh24(fh->fh_s_id);
1011*4882a593Smuzhiyun ep->oid = ep->did;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /*
1014*4882a593Smuzhiyun * Allocated exchange has placed the XID in the
1015*4882a593Smuzhiyun * originator field. Move it to the responder field,
1016*4882a593Smuzhiyun * and set the originator XID from the frame.
1017*4882a593Smuzhiyun */
1018*4882a593Smuzhiyun ep->rxid = ep->xid;
1019*4882a593Smuzhiyun ep->oxid = ntohs(fh->fh_ox_id);
1020*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1021*4882a593Smuzhiyun if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1022*4882a593Smuzhiyun ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun fc_exch_hold(ep); /* hold for caller */
1025*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun return ep;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /**
1031*4882a593Smuzhiyun * fc_seq_lookup_recip() - Find a sequence where the other end
1032*4882a593Smuzhiyun * originated the sequence
1033*4882a593Smuzhiyun * @lport: The local port that the frame was sent to
1034*4882a593Smuzhiyun * @mp: The Exchange Manager to lookup the exchange from
1035*4882a593Smuzhiyun * @fp: The frame associated with the sequence we're looking for
1036*4882a593Smuzhiyun *
1037*4882a593Smuzhiyun * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
1038*4882a593Smuzhiyun * on the ep that should be released by the caller.
1039*4882a593Smuzhiyun */
fc_seq_lookup_recip(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)1040*4882a593Smuzhiyun static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1041*4882a593Smuzhiyun struct fc_exch_mgr *mp,
1042*4882a593Smuzhiyun struct fc_frame *fp)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
1045*4882a593Smuzhiyun struct fc_exch *ep = NULL;
1046*4882a593Smuzhiyun struct fc_seq *sp = NULL;
1047*4882a593Smuzhiyun enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1048*4882a593Smuzhiyun u32 f_ctl;
1049*4882a593Smuzhiyun u16 xid;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
1052*4882a593Smuzhiyun WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun /*
1055*4882a593Smuzhiyun * Lookup or create the exchange if we will be creating the sequence.
1056*4882a593Smuzhiyun */
1057*4882a593Smuzhiyun if (f_ctl & FC_FC_EX_CTX) {
1058*4882a593Smuzhiyun xid = ntohs(fh->fh_ox_id); /* we originated exch */
1059*4882a593Smuzhiyun ep = fc_exch_find(mp, xid);
1060*4882a593Smuzhiyun if (!ep) {
1061*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1062*4882a593Smuzhiyun reject = FC_RJT_OX_ID;
1063*4882a593Smuzhiyun goto out;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun if (ep->rxid == FC_XID_UNKNOWN)
1066*4882a593Smuzhiyun ep->rxid = ntohs(fh->fh_rx_id);
1067*4882a593Smuzhiyun else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1068*4882a593Smuzhiyun reject = FC_RJT_OX_ID;
1069*4882a593Smuzhiyun goto rel;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun } else {
1072*4882a593Smuzhiyun xid = ntohs(fh->fh_rx_id); /* we are the responder */
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun /*
1075*4882a593Smuzhiyun * Special case for MDS issuing an ELS TEST with a
1076*4882a593Smuzhiyun * bad rxid of 0.
1077*4882a593Smuzhiyun * XXX take this out once we do the proper reject.
1078*4882a593Smuzhiyun */
1079*4882a593Smuzhiyun if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1080*4882a593Smuzhiyun fc_frame_payload_op(fp) == ELS_TEST) {
1081*4882a593Smuzhiyun fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1082*4882a593Smuzhiyun xid = FC_XID_UNKNOWN;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun /*
1086*4882a593Smuzhiyun * new sequence - find the exchange
1087*4882a593Smuzhiyun */
1088*4882a593Smuzhiyun ep = fc_exch_find(mp, xid);
1089*4882a593Smuzhiyun if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1090*4882a593Smuzhiyun if (ep) {
1091*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_busy);
1092*4882a593Smuzhiyun reject = FC_RJT_RX_ID;
1093*4882a593Smuzhiyun goto rel;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun ep = fc_exch_resp(lport, mp, fp);
1096*4882a593Smuzhiyun if (!ep) {
1097*4882a593Smuzhiyun reject = FC_RJT_EXCH_EST; /* XXX */
1098*4882a593Smuzhiyun goto out;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun xid = ep->xid; /* get our XID */
1101*4882a593Smuzhiyun } else if (!ep) {
1102*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1103*4882a593Smuzhiyun reject = FC_RJT_RX_ID; /* XID not found */
1104*4882a593Smuzhiyun goto out;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1109*4882a593Smuzhiyun /*
1110*4882a593Smuzhiyun * At this point, we have the exchange held.
1111*4882a593Smuzhiyun * Find or create the sequence.
1112*4882a593Smuzhiyun */
1113*4882a593Smuzhiyun if (fc_sof_is_init(fr_sof(fp))) {
1114*4882a593Smuzhiyun sp = &ep->seq;
1115*4882a593Smuzhiyun sp->ssb_stat |= SSB_ST_RESP;
1116*4882a593Smuzhiyun sp->id = fh->fh_seq_id;
1117*4882a593Smuzhiyun } else {
1118*4882a593Smuzhiyun sp = &ep->seq;
1119*4882a593Smuzhiyun if (sp->id != fh->fh_seq_id) {
1120*4882a593Smuzhiyun atomic_inc(&mp->stats.seq_not_found);
1121*4882a593Smuzhiyun if (f_ctl & FC_FC_END_SEQ) {
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * Update sequence_id based on incoming last
1124*4882a593Smuzhiyun * frame of sequence exchange. This is needed
1125*4882a593Smuzhiyun * for FC target where DDP has been used
1126*4882a593Smuzhiyun * on target where, stack is indicated only
1127*4882a593Smuzhiyun * about last frame's (payload _header) header.
1128*4882a593Smuzhiyun * Whereas "seq_id" which is part of
1129*4882a593Smuzhiyun * frame_header is allocated by initiator
1130*4882a593Smuzhiyun * which is totally different from "seq_id"
1131*4882a593Smuzhiyun * allocated when XFER_RDY was sent by target.
1132*4882a593Smuzhiyun * To avoid false -ve which results into not
1133*4882a593Smuzhiyun * sending RSP, hence write request on other
1134*4882a593Smuzhiyun * end never finishes.
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun sp->ssb_stat |= SSB_ST_RESP;
1137*4882a593Smuzhiyun sp->id = fh->fh_seq_id;
1138*4882a593Smuzhiyun } else {
1139*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun /* sequence/exch should exist */
1142*4882a593Smuzhiyun reject = FC_RJT_SEQ_ID;
1143*4882a593Smuzhiyun goto rel;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun }
1147*4882a593Smuzhiyun WARN_ON(ep != fc_seq_exch(sp));
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (f_ctl & FC_FC_SEQ_INIT)
1150*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_SEQ_INIT;
1151*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun fr_seq(fp) = sp;
1154*4882a593Smuzhiyun out:
1155*4882a593Smuzhiyun return reject;
1156*4882a593Smuzhiyun rel:
1157*4882a593Smuzhiyun fc_exch_done(&ep->seq);
1158*4882a593Smuzhiyun fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
1159*4882a593Smuzhiyun return reject;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /**
1163*4882a593Smuzhiyun * fc_seq_lookup_orig() - Find a sequence where this end
1164*4882a593Smuzhiyun * originated the sequence
1165*4882a593Smuzhiyun * @mp: The Exchange Manager to lookup the exchange from
1166*4882a593Smuzhiyun * @fp: The frame associated with the sequence we're looking for
1167*4882a593Smuzhiyun *
1168*4882a593Smuzhiyun * Does not hold the sequence for the caller.
1169*4882a593Smuzhiyun */
fc_seq_lookup_orig(struct fc_exch_mgr * mp,struct fc_frame * fp)1170*4882a593Smuzhiyun static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1171*4882a593Smuzhiyun struct fc_frame *fp)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
1174*4882a593Smuzhiyun struct fc_exch *ep;
1175*4882a593Smuzhiyun struct fc_seq *sp = NULL;
1176*4882a593Smuzhiyun u32 f_ctl;
1177*4882a593Smuzhiyun u16 xid;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
1180*4882a593Smuzhiyun WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1181*4882a593Smuzhiyun xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1182*4882a593Smuzhiyun ep = fc_exch_find(mp, xid);
1183*4882a593Smuzhiyun if (!ep)
1184*4882a593Smuzhiyun return NULL;
1185*4882a593Smuzhiyun if (ep->seq.id == fh->fh_seq_id) {
1186*4882a593Smuzhiyun /*
1187*4882a593Smuzhiyun * Save the RX_ID if we didn't previously know it.
1188*4882a593Smuzhiyun */
1189*4882a593Smuzhiyun sp = &ep->seq;
1190*4882a593Smuzhiyun if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1191*4882a593Smuzhiyun ep->rxid == FC_XID_UNKNOWN) {
1192*4882a593Smuzhiyun ep->rxid = ntohs(fh->fh_rx_id);
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun fc_exch_release(ep);
1196*4882a593Smuzhiyun return sp;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun /**
1200*4882a593Smuzhiyun * fc_exch_set_addr() - Set the source and destination IDs for an exchange
1201*4882a593Smuzhiyun * @ep: The exchange to set the addresses for
1202*4882a593Smuzhiyun * @orig_id: The originator's ID
1203*4882a593Smuzhiyun * @resp_id: The responder's ID
1204*4882a593Smuzhiyun *
1205*4882a593Smuzhiyun * Note this must be done before the first sequence of the exchange is sent.
1206*4882a593Smuzhiyun */
fc_exch_set_addr(struct fc_exch * ep,u32 orig_id,u32 resp_id)1207*4882a593Smuzhiyun static void fc_exch_set_addr(struct fc_exch *ep,
1208*4882a593Smuzhiyun u32 orig_id, u32 resp_id)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun ep->oid = orig_id;
1211*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_RESP) {
1212*4882a593Smuzhiyun ep->sid = resp_id;
1213*4882a593Smuzhiyun ep->did = orig_id;
1214*4882a593Smuzhiyun } else {
1215*4882a593Smuzhiyun ep->sid = orig_id;
1216*4882a593Smuzhiyun ep->did = resp_id;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /**
1221*4882a593Smuzhiyun * fc_seq_els_rsp_send() - Send an ELS response using information from
1222*4882a593Smuzhiyun * the existing sequence/exchange.
1223*4882a593Smuzhiyun * @fp: The received frame
1224*4882a593Smuzhiyun * @els_cmd: The ELS command to be sent
1225*4882a593Smuzhiyun * @els_data: The ELS data to be sent
1226*4882a593Smuzhiyun *
1227*4882a593Smuzhiyun * The received frame is not freed.
1228*4882a593Smuzhiyun */
fc_seq_els_rsp_send(struct fc_frame * fp,enum fc_els_cmd els_cmd,struct fc_seq_els_data * els_data)1229*4882a593Smuzhiyun void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1230*4882a593Smuzhiyun struct fc_seq_els_data *els_data)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun switch (els_cmd) {
1233*4882a593Smuzhiyun case ELS_LS_RJT:
1234*4882a593Smuzhiyun fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1235*4882a593Smuzhiyun break;
1236*4882a593Smuzhiyun case ELS_LS_ACC:
1237*4882a593Smuzhiyun fc_seq_ls_acc(fp);
1238*4882a593Smuzhiyun break;
1239*4882a593Smuzhiyun case ELS_RRQ:
1240*4882a593Smuzhiyun fc_exch_els_rrq(fp);
1241*4882a593Smuzhiyun break;
1242*4882a593Smuzhiyun case ELS_REC:
1243*4882a593Smuzhiyun fc_exch_els_rec(fp);
1244*4882a593Smuzhiyun break;
1245*4882a593Smuzhiyun default:
1246*4882a593Smuzhiyun FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /**
1252*4882a593Smuzhiyun * fc_seq_send_last() - Send a sequence that is the last in the exchange
1253*4882a593Smuzhiyun * @sp: The sequence that is to be sent
1254*4882a593Smuzhiyun * @fp: The frame that will be sent on the sequence
1255*4882a593Smuzhiyun * @rctl: The R_CTL information to be sent
1256*4882a593Smuzhiyun * @fh_type: The frame header type
1257*4882a593Smuzhiyun */
fc_seq_send_last(struct fc_seq * sp,struct fc_frame * fp,enum fc_rctl rctl,enum fc_fh_type fh_type)1258*4882a593Smuzhiyun static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1259*4882a593Smuzhiyun enum fc_rctl rctl, enum fc_fh_type fh_type)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun u32 f_ctl;
1262*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1265*4882a593Smuzhiyun f_ctl |= ep->f_ctl;
1266*4882a593Smuzhiyun fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1267*4882a593Smuzhiyun fc_seq_send_locked(ep->lp, sp, fp);
1268*4882a593Smuzhiyun }
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun /**
1271*4882a593Smuzhiyun * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1272*4882a593Smuzhiyun * @sp: The sequence to send the ACK on
1273*4882a593Smuzhiyun * @rx_fp: The received frame that is being acknoledged
1274*4882a593Smuzhiyun *
1275*4882a593Smuzhiyun * Send ACK_1 (or equiv.) indicating we received something.
1276*4882a593Smuzhiyun */
fc_seq_send_ack(struct fc_seq * sp,const struct fc_frame * rx_fp)1277*4882a593Smuzhiyun static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun struct fc_frame *fp;
1280*4882a593Smuzhiyun struct fc_frame_header *rx_fh;
1281*4882a593Smuzhiyun struct fc_frame_header *fh;
1282*4882a593Smuzhiyun struct fc_exch *ep = fc_seq_exch(sp);
1283*4882a593Smuzhiyun struct fc_lport *lport = ep->lp;
1284*4882a593Smuzhiyun unsigned int f_ctl;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun /*
1287*4882a593Smuzhiyun * Don't send ACKs for class 3.
1288*4882a593Smuzhiyun */
1289*4882a593Smuzhiyun if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1290*4882a593Smuzhiyun fp = fc_frame_alloc(lport, 0);
1291*4882a593Smuzhiyun if (!fp) {
1292*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1293*4882a593Smuzhiyun return;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1297*4882a593Smuzhiyun fh->fh_r_ctl = FC_RCTL_ACK_1;
1298*4882a593Smuzhiyun fh->fh_type = FC_TYPE_BLS;
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /*
1301*4882a593Smuzhiyun * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1302*4882a593Smuzhiyun * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1303*4882a593Smuzhiyun * Bits 9-8 are meaningful (retransmitted or unidirectional).
1304*4882a593Smuzhiyun * Last ACK uses bits 7-6 (continue sequence),
1305*4882a593Smuzhiyun * bits 5-4 are meaningful (what kind of ACK to use).
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun rx_fh = fc_frame_header_get(rx_fp);
1308*4882a593Smuzhiyun f_ctl = ntoh24(rx_fh->fh_f_ctl);
1309*4882a593Smuzhiyun f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1310*4882a593Smuzhiyun FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1311*4882a593Smuzhiyun FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1312*4882a593Smuzhiyun FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1313*4882a593Smuzhiyun f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1314*4882a593Smuzhiyun hton24(fh->fh_f_ctl, f_ctl);
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun fc_exch_setup_hdr(ep, fp, f_ctl);
1317*4882a593Smuzhiyun fh->fh_seq_id = rx_fh->fh_seq_id;
1318*4882a593Smuzhiyun fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1319*4882a593Smuzhiyun fh->fh_parm_offset = htonl(1); /* ack single frame */
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun fr_sof(fp) = fr_sof(rx_fp);
1322*4882a593Smuzhiyun if (f_ctl & FC_FC_END_SEQ)
1323*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_T;
1324*4882a593Smuzhiyun else
1325*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_N;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun lport->tt.frame_send(lport, fp);
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /**
1332*4882a593Smuzhiyun * fc_exch_send_ba_rjt() - Send BLS Reject
1333*4882a593Smuzhiyun * @rx_fp: The frame being rejected
1334*4882a593Smuzhiyun * @reason: The reason the frame is being rejected
1335*4882a593Smuzhiyun * @explan: The explanation for the rejection
1336*4882a593Smuzhiyun *
1337*4882a593Smuzhiyun * This is for rejecting BA_ABTS only.
1338*4882a593Smuzhiyun */
fc_exch_send_ba_rjt(struct fc_frame * rx_fp,enum fc_ba_rjt_reason reason,enum fc_ba_rjt_explan explan)1339*4882a593Smuzhiyun static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1340*4882a593Smuzhiyun enum fc_ba_rjt_reason reason,
1341*4882a593Smuzhiyun enum fc_ba_rjt_explan explan)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun struct fc_frame *fp;
1344*4882a593Smuzhiyun struct fc_frame_header *rx_fh;
1345*4882a593Smuzhiyun struct fc_frame_header *fh;
1346*4882a593Smuzhiyun struct fc_ba_rjt *rp;
1347*4882a593Smuzhiyun struct fc_seq *sp;
1348*4882a593Smuzhiyun struct fc_lport *lport;
1349*4882a593Smuzhiyun unsigned int f_ctl;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun lport = fr_dev(rx_fp);
1352*4882a593Smuzhiyun sp = fr_seq(rx_fp);
1353*4882a593Smuzhiyun fp = fc_frame_alloc(lport, sizeof(*rp));
1354*4882a593Smuzhiyun if (!fp) {
1355*4882a593Smuzhiyun FC_EXCH_DBG(fc_seq_exch(sp),
1356*4882a593Smuzhiyun "Drop BA_RJT request, out of memory\n");
1357*4882a593Smuzhiyun return;
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1360*4882a593Smuzhiyun rx_fh = fc_frame_header_get(rx_fp);
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun rp = fc_frame_payload_get(fp, sizeof(*rp));
1365*4882a593Smuzhiyun rp->br_reason = reason;
1366*4882a593Smuzhiyun rp->br_explan = explan;
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /*
1369*4882a593Smuzhiyun * seq_id, cs_ctl, df_ctl and param/offset are zero.
1370*4882a593Smuzhiyun */
1371*4882a593Smuzhiyun memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1372*4882a593Smuzhiyun memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1373*4882a593Smuzhiyun fh->fh_ox_id = rx_fh->fh_ox_id;
1374*4882a593Smuzhiyun fh->fh_rx_id = rx_fh->fh_rx_id;
1375*4882a593Smuzhiyun fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1376*4882a593Smuzhiyun fh->fh_r_ctl = FC_RCTL_BA_RJT;
1377*4882a593Smuzhiyun fh->fh_type = FC_TYPE_BLS;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /*
1380*4882a593Smuzhiyun * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1381*4882a593Smuzhiyun * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1382*4882a593Smuzhiyun * Bits 9-8 are meaningful (retransmitted or unidirectional).
1383*4882a593Smuzhiyun * Last ACK uses bits 7-6 (continue sequence),
1384*4882a593Smuzhiyun * bits 5-4 are meaningful (what kind of ACK to use).
1385*4882a593Smuzhiyun * Always set LAST_SEQ, END_SEQ.
1386*4882a593Smuzhiyun */
1387*4882a593Smuzhiyun f_ctl = ntoh24(rx_fh->fh_f_ctl);
1388*4882a593Smuzhiyun f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1389*4882a593Smuzhiyun FC_FC_END_CONN | FC_FC_SEQ_INIT |
1390*4882a593Smuzhiyun FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1391*4882a593Smuzhiyun f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1392*4882a593Smuzhiyun f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1393*4882a593Smuzhiyun f_ctl &= ~FC_FC_FIRST_SEQ;
1394*4882a593Smuzhiyun hton24(fh->fh_f_ctl, f_ctl);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1397*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_T;
1398*4882a593Smuzhiyun if (fc_sof_needs_ack(fr_sof(fp)))
1399*4882a593Smuzhiyun fr_eof(fp) = FC_EOF_N;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun lport->tt.frame_send(lport, fp);
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /**
1405*4882a593Smuzhiyun * fc_exch_recv_abts() - Handle an incoming ABTS
1406*4882a593Smuzhiyun * @ep: The exchange the abort was on
1407*4882a593Smuzhiyun * @rx_fp: The ABTS frame
1408*4882a593Smuzhiyun *
1409*4882a593Smuzhiyun * This would be for target mode usually, but could be due to lost
1410*4882a593Smuzhiyun * FCP transfer ready, confirm or RRQ. We always handle this as an
1411*4882a593Smuzhiyun * exchange abort, ignoring the parameter.
1412*4882a593Smuzhiyun */
fc_exch_recv_abts(struct fc_exch * ep,struct fc_frame * rx_fp)1413*4882a593Smuzhiyun static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1414*4882a593Smuzhiyun {
1415*4882a593Smuzhiyun struct fc_frame *fp;
1416*4882a593Smuzhiyun struct fc_ba_acc *ap;
1417*4882a593Smuzhiyun struct fc_frame_header *fh;
1418*4882a593Smuzhiyun struct fc_seq *sp;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (!ep)
1421*4882a593Smuzhiyun goto reject;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: ABTS received\n");
1424*4882a593Smuzhiyun fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1425*4882a593Smuzhiyun if (!fp) {
1426*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1427*4882a593Smuzhiyun goto free;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1431*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_COMPLETE) {
1432*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1433*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1434*4882a593Smuzhiyun fc_frame_free(fp);
1435*4882a593Smuzhiyun goto reject;
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1438*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_REC_QUAL;
1439*4882a593Smuzhiyun fc_exch_hold(ep); /* hold for REC_QUAL */
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun fc_exch_timer_set_locked(ep, ep->r_a_tov);
1442*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1443*4882a593Smuzhiyun ap = fc_frame_payload_get(fp, sizeof(*ap));
1444*4882a593Smuzhiyun memset(ap, 0, sizeof(*ap));
1445*4882a593Smuzhiyun sp = &ep->seq;
1446*4882a593Smuzhiyun ap->ba_high_seq_cnt = htons(0xffff);
1447*4882a593Smuzhiyun if (sp->ssb_stat & SSB_ST_RESP) {
1448*4882a593Smuzhiyun ap->ba_seq_id = sp->id;
1449*4882a593Smuzhiyun ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1450*4882a593Smuzhiyun ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1451*4882a593Smuzhiyun ap->ba_low_seq_cnt = htons(sp->cnt);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun sp = fc_seq_start_next_locked(sp);
1454*4882a593Smuzhiyun fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1455*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_ABNORMAL;
1456*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun free:
1459*4882a593Smuzhiyun fc_frame_free(rx_fp);
1460*4882a593Smuzhiyun return;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun reject:
1463*4882a593Smuzhiyun fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1464*4882a593Smuzhiyun goto free;
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun /**
1468*4882a593Smuzhiyun * fc_seq_assign() - Assign exchange and sequence for incoming request
1469*4882a593Smuzhiyun * @lport: The local port that received the request
1470*4882a593Smuzhiyun * @fp: The request frame
1471*4882a593Smuzhiyun *
1472*4882a593Smuzhiyun * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1473*4882a593Smuzhiyun * A reference will be held on the exchange/sequence for the caller, which
1474*4882a593Smuzhiyun * must call fc_seq_release().
1475*4882a593Smuzhiyun */
fc_seq_assign(struct fc_lport * lport,struct fc_frame * fp)1476*4882a593Smuzhiyun struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun WARN_ON(lport != fr_dev(fp));
1481*4882a593Smuzhiyun WARN_ON(fr_seq(fp));
1482*4882a593Smuzhiyun fr_seq(fp) = NULL;
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list)
1485*4882a593Smuzhiyun if ((!ema->match || ema->match(fp)) &&
1486*4882a593Smuzhiyun fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1487*4882a593Smuzhiyun break;
1488*4882a593Smuzhiyun return fr_seq(fp);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun EXPORT_SYMBOL(fc_seq_assign);
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /**
1493*4882a593Smuzhiyun * fc_seq_release() - Release the hold
1494*4882a593Smuzhiyun * @sp: The sequence.
1495*4882a593Smuzhiyun */
fc_seq_release(struct fc_seq * sp)1496*4882a593Smuzhiyun void fc_seq_release(struct fc_seq *sp)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun fc_exch_release(fc_seq_exch(sp));
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun EXPORT_SYMBOL(fc_seq_release);
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /**
1503*4882a593Smuzhiyun * fc_exch_recv_req() - Handler for an incoming request
1504*4882a593Smuzhiyun * @lport: The local port that received the request
1505*4882a593Smuzhiyun * @mp: The EM that the exchange is on
1506*4882a593Smuzhiyun * @fp: The request frame
1507*4882a593Smuzhiyun *
1508*4882a593Smuzhiyun * This is used when the other end is originating the exchange
1509*4882a593Smuzhiyun * and the sequence.
1510*4882a593Smuzhiyun */
fc_exch_recv_req(struct fc_lport * lport,struct fc_exch_mgr * mp,struct fc_frame * fp)1511*4882a593Smuzhiyun static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1512*4882a593Smuzhiyun struct fc_frame *fp)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
1515*4882a593Smuzhiyun struct fc_seq *sp = NULL;
1516*4882a593Smuzhiyun struct fc_exch *ep = NULL;
1517*4882a593Smuzhiyun enum fc_pf_rjt_reason reject;
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /* We can have the wrong fc_lport at this point with NPIV, which is a
1520*4882a593Smuzhiyun * problem now that we know a new exchange needs to be allocated
1521*4882a593Smuzhiyun */
1522*4882a593Smuzhiyun lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1523*4882a593Smuzhiyun if (!lport) {
1524*4882a593Smuzhiyun fc_frame_free(fp);
1525*4882a593Smuzhiyun return;
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun fr_dev(fp) = lport;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun BUG_ON(fr_seq(fp)); /* XXX remove later */
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /*
1532*4882a593Smuzhiyun * If the RX_ID is 0xffff, don't allocate an exchange.
1533*4882a593Smuzhiyun * The upper-level protocol may request one later, if needed.
1534*4882a593Smuzhiyun */
1535*4882a593Smuzhiyun if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1536*4882a593Smuzhiyun return fc_lport_recv(lport, fp);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun reject = fc_seq_lookup_recip(lport, mp, fp);
1539*4882a593Smuzhiyun if (reject == FC_RJT_NONE) {
1540*4882a593Smuzhiyun sp = fr_seq(fp); /* sequence will be held */
1541*4882a593Smuzhiyun ep = fc_seq_exch(sp);
1542*4882a593Smuzhiyun fc_seq_send_ack(sp, fp);
1543*4882a593Smuzhiyun ep->encaps = fr_encaps(fp);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /*
1546*4882a593Smuzhiyun * Call the receive function.
1547*4882a593Smuzhiyun *
1548*4882a593Smuzhiyun * The receive function may allocate a new sequence
1549*4882a593Smuzhiyun * over the old one, so we shouldn't change the
1550*4882a593Smuzhiyun * sequence after this.
1551*4882a593Smuzhiyun *
1552*4882a593Smuzhiyun * The frame will be freed by the receive function.
1553*4882a593Smuzhiyun * If new exch resp handler is valid then call that
1554*4882a593Smuzhiyun * first.
1555*4882a593Smuzhiyun */
1556*4882a593Smuzhiyun if (!fc_invoke_resp(ep, sp, fp))
1557*4882a593Smuzhiyun fc_lport_recv(lport, fp);
1558*4882a593Smuzhiyun fc_exch_release(ep); /* release from lookup */
1559*4882a593Smuzhiyun } else {
1560*4882a593Smuzhiyun FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1561*4882a593Smuzhiyun reject);
1562*4882a593Smuzhiyun fc_frame_free(fp);
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun /**
1567*4882a593Smuzhiyun * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1568*4882a593Smuzhiyun * end is the originator of the sequence that is a
1569*4882a593Smuzhiyun * response to our initial exchange
1570*4882a593Smuzhiyun * @mp: The EM that the exchange is on
1571*4882a593Smuzhiyun * @fp: The response frame
1572*4882a593Smuzhiyun */
fc_exch_recv_seq_resp(struct fc_exch_mgr * mp,struct fc_frame * fp)1573*4882a593Smuzhiyun static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
1576*4882a593Smuzhiyun struct fc_seq *sp;
1577*4882a593Smuzhiyun struct fc_exch *ep;
1578*4882a593Smuzhiyun enum fc_sof sof;
1579*4882a593Smuzhiyun u32 f_ctl;
1580*4882a593Smuzhiyun int rc;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1583*4882a593Smuzhiyun if (!ep) {
1584*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1585*4882a593Smuzhiyun goto out;
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_COMPLETE) {
1588*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1589*4882a593Smuzhiyun goto rel;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun if (ep->rxid == FC_XID_UNKNOWN)
1592*4882a593Smuzhiyun ep->rxid = ntohs(fh->fh_rx_id);
1593*4882a593Smuzhiyun if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1594*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1595*4882a593Smuzhiyun goto rel;
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun if (ep->did != ntoh24(fh->fh_s_id) &&
1598*4882a593Smuzhiyun ep->did != FC_FID_FLOGI) {
1599*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1600*4882a593Smuzhiyun goto rel;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun sof = fr_sof(fp);
1603*4882a593Smuzhiyun sp = &ep->seq;
1604*4882a593Smuzhiyun if (fc_sof_is_init(sof)) {
1605*4882a593Smuzhiyun sp->ssb_stat |= SSB_ST_RESP;
1606*4882a593Smuzhiyun sp->id = fh->fh_seq_id;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
1610*4882a593Smuzhiyun fr_seq(fp) = sp;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1613*4882a593Smuzhiyun if (f_ctl & FC_FC_SEQ_INIT)
1614*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_SEQ_INIT;
1615*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun if (fc_sof_needs_ack(sof))
1618*4882a593Smuzhiyun fc_seq_send_ack(sp, fp);
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1621*4882a593Smuzhiyun (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1622*4882a593Smuzhiyun (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1623*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1624*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
1625*4882a593Smuzhiyun WARN_ON(fc_seq_exch(sp) != ep);
1626*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1627*4882a593Smuzhiyun if (!rc) {
1628*4882a593Smuzhiyun fc_exch_delete(ep);
1629*4882a593Smuzhiyun } else {
1630*4882a593Smuzhiyun FC_EXCH_DBG(ep, "ep is completed already,"
1631*4882a593Smuzhiyun "hence skip calling the resp\n");
1632*4882a593Smuzhiyun goto skip_resp;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun /*
1637*4882a593Smuzhiyun * Call the receive function.
1638*4882a593Smuzhiyun * The sequence is held (has a refcnt) for us,
1639*4882a593Smuzhiyun * but not for the receive function.
1640*4882a593Smuzhiyun *
1641*4882a593Smuzhiyun * The receive function may allocate a new sequence
1642*4882a593Smuzhiyun * over the old one, so we shouldn't change the
1643*4882a593Smuzhiyun * sequence after this.
1644*4882a593Smuzhiyun *
1645*4882a593Smuzhiyun * The frame will be freed by the receive function.
1646*4882a593Smuzhiyun * If new exch resp handler is valid then call that
1647*4882a593Smuzhiyun * first.
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun if (!fc_invoke_resp(ep, sp, fp))
1650*4882a593Smuzhiyun fc_frame_free(fp);
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun skip_resp:
1653*4882a593Smuzhiyun fc_exch_release(ep);
1654*4882a593Smuzhiyun return;
1655*4882a593Smuzhiyun rel:
1656*4882a593Smuzhiyun fc_exch_release(ep);
1657*4882a593Smuzhiyun out:
1658*4882a593Smuzhiyun fc_frame_free(fp);
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun /**
1662*4882a593Smuzhiyun * fc_exch_recv_resp() - Handler for a sequence where other end is
1663*4882a593Smuzhiyun * responding to our sequence
1664*4882a593Smuzhiyun * @mp: The EM that the exchange is on
1665*4882a593Smuzhiyun * @fp: The response frame
1666*4882a593Smuzhiyun */
fc_exch_recv_resp(struct fc_exch_mgr * mp,struct fc_frame * fp)1667*4882a593Smuzhiyun static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1668*4882a593Smuzhiyun {
1669*4882a593Smuzhiyun struct fc_seq *sp;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun if (!sp)
1674*4882a593Smuzhiyun atomic_inc(&mp->stats.xid_not_found);
1675*4882a593Smuzhiyun else
1676*4882a593Smuzhiyun atomic_inc(&mp->stats.non_bls_resp);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun fc_frame_free(fp);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun /**
1682*4882a593Smuzhiyun * fc_exch_abts_resp() - Handler for a response to an ABT
1683*4882a593Smuzhiyun * @ep: The exchange that the frame is on
1684*4882a593Smuzhiyun * @fp: The response frame
1685*4882a593Smuzhiyun *
1686*4882a593Smuzhiyun * This response would be to an ABTS cancelling an exchange or sequence.
1687*4882a593Smuzhiyun * The response can be either BA_ACC or BA_RJT
1688*4882a593Smuzhiyun */
fc_exch_abts_resp(struct fc_exch * ep,struct fc_frame * fp)1689*4882a593Smuzhiyun static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1690*4882a593Smuzhiyun {
1691*4882a593Smuzhiyun struct fc_frame_header *fh;
1692*4882a593Smuzhiyun struct fc_ba_acc *ap;
1693*4882a593Smuzhiyun struct fc_seq *sp;
1694*4882a593Smuzhiyun u16 low;
1695*4882a593Smuzhiyun u16 high;
1696*4882a593Smuzhiyun int rc = 1, has_rec = 0;
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1699*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1700*4882a593Smuzhiyun fc_exch_rctl_name(fh->fh_r_ctl));
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun if (cancel_delayed_work_sync(&ep->timeout_work)) {
1703*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1704*4882a593Smuzhiyun fc_exch_release(ep); /* release from pending timer hold */
1705*4882a593Smuzhiyun return;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1709*4882a593Smuzhiyun switch (fh->fh_r_ctl) {
1710*4882a593Smuzhiyun case FC_RCTL_BA_ACC:
1711*4882a593Smuzhiyun ap = fc_frame_payload_get(fp, sizeof(*ap));
1712*4882a593Smuzhiyun if (!ap)
1713*4882a593Smuzhiyun break;
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun /*
1716*4882a593Smuzhiyun * Decide whether to establish a Recovery Qualifier.
1717*4882a593Smuzhiyun * We do this if there is a non-empty SEQ_CNT range and
1718*4882a593Smuzhiyun * SEQ_ID is the same as the one we aborted.
1719*4882a593Smuzhiyun */
1720*4882a593Smuzhiyun low = ntohs(ap->ba_low_seq_cnt);
1721*4882a593Smuzhiyun high = ntohs(ap->ba_high_seq_cnt);
1722*4882a593Smuzhiyun if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1723*4882a593Smuzhiyun (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1724*4882a593Smuzhiyun ap->ba_seq_id == ep->seq_id) && low != high) {
1725*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_REC_QUAL;
1726*4882a593Smuzhiyun fc_exch_hold(ep); /* hold for recovery qualifier */
1727*4882a593Smuzhiyun has_rec = 1;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun break;
1730*4882a593Smuzhiyun case FC_RCTL_BA_RJT:
1731*4882a593Smuzhiyun break;
1732*4882a593Smuzhiyun default:
1733*4882a593Smuzhiyun break;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /* do we need to do some other checks here. Can we reuse more of
1737*4882a593Smuzhiyun * fc_exch_recv_seq_resp
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun sp = &ep->seq;
1740*4882a593Smuzhiyun /*
1741*4882a593Smuzhiyun * do we want to check END_SEQ as well as LAST_SEQ here?
1742*4882a593Smuzhiyun */
1743*4882a593Smuzhiyun if (ep->fh_type != FC_TYPE_FCP &&
1744*4882a593Smuzhiyun ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1745*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
1746*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun fc_exch_hold(ep);
1749*4882a593Smuzhiyun if (!rc)
1750*4882a593Smuzhiyun fc_exch_delete(ep);
1751*4882a593Smuzhiyun if (!fc_invoke_resp(ep, sp, fp))
1752*4882a593Smuzhiyun fc_frame_free(fp);
1753*4882a593Smuzhiyun if (has_rec)
1754*4882a593Smuzhiyun fc_exch_timer_set(ep, ep->r_a_tov);
1755*4882a593Smuzhiyun fc_exch_release(ep);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /**
1759*4882a593Smuzhiyun * fc_exch_recv_bls() - Handler for a BLS sequence
1760*4882a593Smuzhiyun * @mp: The EM that the exchange is on
1761*4882a593Smuzhiyun * @fp: The request frame
1762*4882a593Smuzhiyun *
1763*4882a593Smuzhiyun * The BLS frame is always a sequence initiated by the remote side.
1764*4882a593Smuzhiyun * We may be either the originator or recipient of the exchange.
1765*4882a593Smuzhiyun */
fc_exch_recv_bls(struct fc_exch_mgr * mp,struct fc_frame * fp)1766*4882a593Smuzhiyun static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun struct fc_frame_header *fh;
1769*4882a593Smuzhiyun struct fc_exch *ep;
1770*4882a593Smuzhiyun u32 f_ctl;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
1773*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
1774*4882a593Smuzhiyun fr_seq(fp) = NULL;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1777*4882a593Smuzhiyun ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1778*4882a593Smuzhiyun if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1779*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1780*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_SEQ_INIT;
1781*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun if (f_ctl & FC_FC_SEQ_CTX) {
1784*4882a593Smuzhiyun /*
1785*4882a593Smuzhiyun * A response to a sequence we initiated.
1786*4882a593Smuzhiyun * This should only be ACKs for class 2 or F.
1787*4882a593Smuzhiyun */
1788*4882a593Smuzhiyun switch (fh->fh_r_ctl) {
1789*4882a593Smuzhiyun case FC_RCTL_ACK_1:
1790*4882a593Smuzhiyun case FC_RCTL_ACK_0:
1791*4882a593Smuzhiyun break;
1792*4882a593Smuzhiyun default:
1793*4882a593Smuzhiyun if (ep)
1794*4882a593Smuzhiyun FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1795*4882a593Smuzhiyun fh->fh_r_ctl,
1796*4882a593Smuzhiyun fc_exch_rctl_name(fh->fh_r_ctl));
1797*4882a593Smuzhiyun break;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun fc_frame_free(fp);
1800*4882a593Smuzhiyun } else {
1801*4882a593Smuzhiyun switch (fh->fh_r_ctl) {
1802*4882a593Smuzhiyun case FC_RCTL_BA_RJT:
1803*4882a593Smuzhiyun case FC_RCTL_BA_ACC:
1804*4882a593Smuzhiyun if (ep)
1805*4882a593Smuzhiyun fc_exch_abts_resp(ep, fp);
1806*4882a593Smuzhiyun else
1807*4882a593Smuzhiyun fc_frame_free(fp);
1808*4882a593Smuzhiyun break;
1809*4882a593Smuzhiyun case FC_RCTL_BA_ABTS:
1810*4882a593Smuzhiyun if (ep)
1811*4882a593Smuzhiyun fc_exch_recv_abts(ep, fp);
1812*4882a593Smuzhiyun else
1813*4882a593Smuzhiyun fc_frame_free(fp);
1814*4882a593Smuzhiyun break;
1815*4882a593Smuzhiyun default: /* ignore junk */
1816*4882a593Smuzhiyun fc_frame_free(fp);
1817*4882a593Smuzhiyun break;
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun if (ep)
1821*4882a593Smuzhiyun fc_exch_release(ep); /* release hold taken by fc_exch_find */
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun /**
1825*4882a593Smuzhiyun * fc_seq_ls_acc() - Accept sequence with LS_ACC
1826*4882a593Smuzhiyun * @rx_fp: The received frame, not freed here.
1827*4882a593Smuzhiyun *
1828*4882a593Smuzhiyun * If this fails due to allocation or transmit congestion, assume the
1829*4882a593Smuzhiyun * originator will repeat the sequence.
1830*4882a593Smuzhiyun */
fc_seq_ls_acc(struct fc_frame * rx_fp)1831*4882a593Smuzhiyun static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun struct fc_lport *lport;
1834*4882a593Smuzhiyun struct fc_els_ls_acc *acc;
1835*4882a593Smuzhiyun struct fc_frame *fp;
1836*4882a593Smuzhiyun struct fc_seq *sp;
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun lport = fr_dev(rx_fp);
1839*4882a593Smuzhiyun sp = fr_seq(rx_fp);
1840*4882a593Smuzhiyun fp = fc_frame_alloc(lport, sizeof(*acc));
1841*4882a593Smuzhiyun if (!fp) {
1842*4882a593Smuzhiyun FC_EXCH_DBG(fc_seq_exch(sp),
1843*4882a593Smuzhiyun "exch: drop LS_ACC, out of memory\n");
1844*4882a593Smuzhiyun return;
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun acc = fc_frame_payload_get(fp, sizeof(*acc));
1847*4882a593Smuzhiyun memset(acc, 0, sizeof(*acc));
1848*4882a593Smuzhiyun acc->la_cmd = ELS_LS_ACC;
1849*4882a593Smuzhiyun fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1850*4882a593Smuzhiyun lport->tt.frame_send(lport, fp);
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun /**
1854*4882a593Smuzhiyun * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1855*4882a593Smuzhiyun * @rx_fp: The received frame, not freed here.
1856*4882a593Smuzhiyun * @reason: The reason the sequence is being rejected
1857*4882a593Smuzhiyun * @explan: The explanation for the rejection
1858*4882a593Smuzhiyun *
1859*4882a593Smuzhiyun * If this fails due to allocation or transmit congestion, assume the
1860*4882a593Smuzhiyun * originator will repeat the sequence.
1861*4882a593Smuzhiyun */
fc_seq_ls_rjt(struct fc_frame * rx_fp,enum fc_els_rjt_reason reason,enum fc_els_rjt_explan explan)1862*4882a593Smuzhiyun static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1863*4882a593Smuzhiyun enum fc_els_rjt_explan explan)
1864*4882a593Smuzhiyun {
1865*4882a593Smuzhiyun struct fc_lport *lport;
1866*4882a593Smuzhiyun struct fc_els_ls_rjt *rjt;
1867*4882a593Smuzhiyun struct fc_frame *fp;
1868*4882a593Smuzhiyun struct fc_seq *sp;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun lport = fr_dev(rx_fp);
1871*4882a593Smuzhiyun sp = fr_seq(rx_fp);
1872*4882a593Smuzhiyun fp = fc_frame_alloc(lport, sizeof(*rjt));
1873*4882a593Smuzhiyun if (!fp) {
1874*4882a593Smuzhiyun FC_EXCH_DBG(fc_seq_exch(sp),
1875*4882a593Smuzhiyun "exch: drop LS_ACC, out of memory\n");
1876*4882a593Smuzhiyun return;
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1879*4882a593Smuzhiyun memset(rjt, 0, sizeof(*rjt));
1880*4882a593Smuzhiyun rjt->er_cmd = ELS_LS_RJT;
1881*4882a593Smuzhiyun rjt->er_reason = reason;
1882*4882a593Smuzhiyun rjt->er_explan = explan;
1883*4882a593Smuzhiyun fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1884*4882a593Smuzhiyun lport->tt.frame_send(lport, fp);
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun /**
1888*4882a593Smuzhiyun * fc_exch_reset() - Reset an exchange
1889*4882a593Smuzhiyun * @ep: The exchange to be reset
1890*4882a593Smuzhiyun *
1891*4882a593Smuzhiyun * Note: May sleep if invoked from outside a response handler.
1892*4882a593Smuzhiyun */
fc_exch_reset(struct fc_exch * ep)1893*4882a593Smuzhiyun static void fc_exch_reset(struct fc_exch *ep)
1894*4882a593Smuzhiyun {
1895*4882a593Smuzhiyun struct fc_seq *sp;
1896*4882a593Smuzhiyun int rc = 1;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
1899*4882a593Smuzhiyun ep->state |= FC_EX_RST_CLEANUP;
1900*4882a593Smuzhiyun fc_exch_timer_cancel(ep);
1901*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_REC_QUAL)
1902*4882a593Smuzhiyun atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
1903*4882a593Smuzhiyun ep->esb_stat &= ~ESB_ST_REC_QUAL;
1904*4882a593Smuzhiyun sp = &ep->seq;
1905*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
1906*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun fc_exch_hold(ep);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun if (!rc) {
1911*4882a593Smuzhiyun fc_exch_delete(ep);
1912*4882a593Smuzhiyun } else {
1913*4882a593Smuzhiyun FC_EXCH_DBG(ep, "ep is completed already,"
1914*4882a593Smuzhiyun "hence skip calling the resp\n");
1915*4882a593Smuzhiyun goto skip_resp;
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1919*4882a593Smuzhiyun skip_resp:
1920*4882a593Smuzhiyun fc_seq_set_resp(sp, NULL, ep->arg);
1921*4882a593Smuzhiyun fc_exch_release(ep);
1922*4882a593Smuzhiyun }
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /**
1925*4882a593Smuzhiyun * fc_exch_pool_reset() - Reset a per cpu exchange pool
1926*4882a593Smuzhiyun * @lport: The local port that the exchange pool is on
1927*4882a593Smuzhiyun * @pool: The exchange pool to be reset
1928*4882a593Smuzhiyun * @sid: The source ID
1929*4882a593Smuzhiyun * @did: The destination ID
1930*4882a593Smuzhiyun *
1931*4882a593Smuzhiyun * Resets a per cpu exches pool, releasing all of its sequences
1932*4882a593Smuzhiyun * and exchanges. If sid is non-zero then reset only exchanges
1933*4882a593Smuzhiyun * we sourced from the local port's FID. If did is non-zero then
1934*4882a593Smuzhiyun * only reset exchanges destined for the local port's FID.
1935*4882a593Smuzhiyun */
fc_exch_pool_reset(struct fc_lport * lport,struct fc_exch_pool * pool,u32 sid,u32 did)1936*4882a593Smuzhiyun static void fc_exch_pool_reset(struct fc_lport *lport,
1937*4882a593Smuzhiyun struct fc_exch_pool *pool,
1938*4882a593Smuzhiyun u32 sid, u32 did)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun struct fc_exch *ep;
1941*4882a593Smuzhiyun struct fc_exch *next;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun spin_lock_bh(&pool->lock);
1944*4882a593Smuzhiyun restart:
1945*4882a593Smuzhiyun list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1946*4882a593Smuzhiyun if ((lport == ep->lp) &&
1947*4882a593Smuzhiyun (sid == 0 || sid == ep->sid) &&
1948*4882a593Smuzhiyun (did == 0 || did == ep->did)) {
1949*4882a593Smuzhiyun fc_exch_hold(ep);
1950*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun fc_exch_reset(ep);
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun fc_exch_release(ep);
1955*4882a593Smuzhiyun spin_lock_bh(&pool->lock);
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun /*
1958*4882a593Smuzhiyun * must restart loop incase while lock
1959*4882a593Smuzhiyun * was down multiple eps were released.
1960*4882a593Smuzhiyun */
1961*4882a593Smuzhiyun goto restart;
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun pool->next_index = 0;
1965*4882a593Smuzhiyun pool->left = FC_XID_UNKNOWN;
1966*4882a593Smuzhiyun pool->right = FC_XID_UNKNOWN;
1967*4882a593Smuzhiyun spin_unlock_bh(&pool->lock);
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun /**
1971*4882a593Smuzhiyun * fc_exch_mgr_reset() - Reset all EMs of a local port
1972*4882a593Smuzhiyun * @lport: The local port whose EMs are to be reset
1973*4882a593Smuzhiyun * @sid: The source ID
1974*4882a593Smuzhiyun * @did: The destination ID
1975*4882a593Smuzhiyun *
1976*4882a593Smuzhiyun * Reset all EMs associated with a given local port. Release all
1977*4882a593Smuzhiyun * sequences and exchanges. If sid is non-zero then reset only the
1978*4882a593Smuzhiyun * exchanges sent from the local port's FID. If did is non-zero then
1979*4882a593Smuzhiyun * reset only exchanges destined for the local port's FID.
1980*4882a593Smuzhiyun */
fc_exch_mgr_reset(struct fc_lport * lport,u32 sid,u32 did)1981*4882a593Smuzhiyun void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
1984*4882a593Smuzhiyun unsigned int cpu;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list) {
1987*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1988*4882a593Smuzhiyun fc_exch_pool_reset(lport,
1989*4882a593Smuzhiyun per_cpu_ptr(ema->mp->pool, cpu),
1990*4882a593Smuzhiyun sid, did);
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_reset);
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun /**
1996*4882a593Smuzhiyun * fc_exch_lookup() - find an exchange
1997*4882a593Smuzhiyun * @lport: The local port
1998*4882a593Smuzhiyun * @xid: The exchange ID
1999*4882a593Smuzhiyun *
2000*4882a593Smuzhiyun * Returns exchange pointer with hold for caller, or NULL if not found.
2001*4882a593Smuzhiyun */
fc_exch_lookup(struct fc_lport * lport,u32 xid)2002*4882a593Smuzhiyun static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
2003*4882a593Smuzhiyun {
2004*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list)
2007*4882a593Smuzhiyun if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
2008*4882a593Smuzhiyun return fc_exch_find(ema->mp, xid);
2009*4882a593Smuzhiyun return NULL;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun
2012*4882a593Smuzhiyun /**
2013*4882a593Smuzhiyun * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
2014*4882a593Smuzhiyun * @rfp: The REC frame, not freed here.
2015*4882a593Smuzhiyun *
2016*4882a593Smuzhiyun * Note that the requesting port may be different than the S_ID in the request.
2017*4882a593Smuzhiyun */
fc_exch_els_rec(struct fc_frame * rfp)2018*4882a593Smuzhiyun static void fc_exch_els_rec(struct fc_frame *rfp)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun struct fc_lport *lport;
2021*4882a593Smuzhiyun struct fc_frame *fp;
2022*4882a593Smuzhiyun struct fc_exch *ep;
2023*4882a593Smuzhiyun struct fc_els_rec *rp;
2024*4882a593Smuzhiyun struct fc_els_rec_acc *acc;
2025*4882a593Smuzhiyun enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2026*4882a593Smuzhiyun enum fc_els_rjt_explan explan;
2027*4882a593Smuzhiyun u32 sid;
2028*4882a593Smuzhiyun u16 xid, rxid, oxid;
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun lport = fr_dev(rfp);
2031*4882a593Smuzhiyun rp = fc_frame_payload_get(rfp, sizeof(*rp));
2032*4882a593Smuzhiyun explan = ELS_EXPL_INV_LEN;
2033*4882a593Smuzhiyun if (!rp)
2034*4882a593Smuzhiyun goto reject;
2035*4882a593Smuzhiyun sid = ntoh24(rp->rec_s_id);
2036*4882a593Smuzhiyun rxid = ntohs(rp->rec_rx_id);
2037*4882a593Smuzhiyun oxid = ntohs(rp->rec_ox_id);
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun explan = ELS_EXPL_OXID_RXID;
2040*4882a593Smuzhiyun if (sid == fc_host_port_id(lport->host))
2041*4882a593Smuzhiyun xid = oxid;
2042*4882a593Smuzhiyun else
2043*4882a593Smuzhiyun xid = rxid;
2044*4882a593Smuzhiyun if (xid == FC_XID_UNKNOWN) {
2045*4882a593Smuzhiyun FC_LPORT_DBG(lport,
2046*4882a593Smuzhiyun "REC request from %x: invalid rxid %x oxid %x\n",
2047*4882a593Smuzhiyun sid, rxid, oxid);
2048*4882a593Smuzhiyun goto reject;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun ep = fc_exch_lookup(lport, xid);
2051*4882a593Smuzhiyun if (!ep) {
2052*4882a593Smuzhiyun FC_LPORT_DBG(lport,
2053*4882a593Smuzhiyun "REC request from %x: rxid %x oxid %x not found\n",
2054*4882a593Smuzhiyun sid, rxid, oxid);
2055*4882a593Smuzhiyun goto reject;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2058*4882a593Smuzhiyun sid, rxid, oxid);
2059*4882a593Smuzhiyun if (ep->oid != sid || oxid != ep->oxid)
2060*4882a593Smuzhiyun goto rel;
2061*4882a593Smuzhiyun if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2062*4882a593Smuzhiyun goto rel;
2063*4882a593Smuzhiyun fp = fc_frame_alloc(lport, sizeof(*acc));
2064*4882a593Smuzhiyun if (!fp) {
2065*4882a593Smuzhiyun FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2066*4882a593Smuzhiyun goto out;
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
2069*4882a593Smuzhiyun acc = fc_frame_payload_get(fp, sizeof(*acc));
2070*4882a593Smuzhiyun memset(acc, 0, sizeof(*acc));
2071*4882a593Smuzhiyun acc->reca_cmd = ELS_LS_ACC;
2072*4882a593Smuzhiyun acc->reca_ox_id = rp->rec_ox_id;
2073*4882a593Smuzhiyun memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2074*4882a593Smuzhiyun acc->reca_rx_id = htons(ep->rxid);
2075*4882a593Smuzhiyun if (ep->sid == ep->oid)
2076*4882a593Smuzhiyun hton24(acc->reca_rfid, ep->did);
2077*4882a593Smuzhiyun else
2078*4882a593Smuzhiyun hton24(acc->reca_rfid, ep->sid);
2079*4882a593Smuzhiyun acc->reca_fc4value = htonl(ep->seq.rec_data);
2080*4882a593Smuzhiyun acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2081*4882a593Smuzhiyun ESB_ST_SEQ_INIT |
2082*4882a593Smuzhiyun ESB_ST_COMPLETE));
2083*4882a593Smuzhiyun fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2084*4882a593Smuzhiyun lport->tt.frame_send(lport, fp);
2085*4882a593Smuzhiyun out:
2086*4882a593Smuzhiyun fc_exch_release(ep);
2087*4882a593Smuzhiyun return;
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun rel:
2090*4882a593Smuzhiyun fc_exch_release(ep);
2091*4882a593Smuzhiyun reject:
2092*4882a593Smuzhiyun fc_seq_ls_rjt(rfp, reason, explan);
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun
2095*4882a593Smuzhiyun /**
2096*4882a593Smuzhiyun * fc_exch_rrq_resp() - Handler for RRQ responses
2097*4882a593Smuzhiyun * @sp: The sequence that the RRQ is on
2098*4882a593Smuzhiyun * @fp: The RRQ frame
2099*4882a593Smuzhiyun * @arg: The exchange that the RRQ is on
2100*4882a593Smuzhiyun *
2101*4882a593Smuzhiyun * TODO: fix error handler.
2102*4882a593Smuzhiyun */
fc_exch_rrq_resp(struct fc_seq * sp,struct fc_frame * fp,void * arg)2103*4882a593Smuzhiyun static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2104*4882a593Smuzhiyun {
2105*4882a593Smuzhiyun struct fc_exch *aborted_ep = arg;
2106*4882a593Smuzhiyun unsigned int op;
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun if (IS_ERR(fp)) {
2109*4882a593Smuzhiyun int err = PTR_ERR(fp);
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2112*4882a593Smuzhiyun goto cleanup;
2113*4882a593Smuzhiyun FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2114*4882a593Smuzhiyun "frame error %d\n", err);
2115*4882a593Smuzhiyun return;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun op = fc_frame_payload_op(fp);
2119*4882a593Smuzhiyun fc_frame_free(fp);
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun switch (op) {
2122*4882a593Smuzhiyun case ELS_LS_RJT:
2123*4882a593Smuzhiyun FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2124*4882a593Smuzhiyun fallthrough;
2125*4882a593Smuzhiyun case ELS_LS_ACC:
2126*4882a593Smuzhiyun goto cleanup;
2127*4882a593Smuzhiyun default:
2128*4882a593Smuzhiyun FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2129*4882a593Smuzhiyun op);
2130*4882a593Smuzhiyun return;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun
2133*4882a593Smuzhiyun cleanup:
2134*4882a593Smuzhiyun fc_exch_done(&aborted_ep->seq);
2135*4882a593Smuzhiyun /* drop hold for rec qual */
2136*4882a593Smuzhiyun fc_exch_release(aborted_ep);
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun
2140*4882a593Smuzhiyun /**
2141*4882a593Smuzhiyun * fc_exch_seq_send() - Send a frame using a new exchange and sequence
2142*4882a593Smuzhiyun * @lport: The local port to send the frame on
2143*4882a593Smuzhiyun * @fp: The frame to be sent
2144*4882a593Smuzhiyun * @resp: The response handler for this request
2145*4882a593Smuzhiyun * @destructor: The destructor for the exchange
2146*4882a593Smuzhiyun * @arg: The argument to be passed to the response handler
2147*4882a593Smuzhiyun * @timer_msec: The timeout period for the exchange
2148*4882a593Smuzhiyun *
2149*4882a593Smuzhiyun * The exchange response handler is set in this routine to resp()
2150*4882a593Smuzhiyun * function pointer. It can be called in two scenarios: if a timeout
2151*4882a593Smuzhiyun * occurs or if a response frame is received for the exchange. The
2152*4882a593Smuzhiyun * fc_frame pointer in response handler will also indicate timeout
2153*4882a593Smuzhiyun * as error using IS_ERR related macros.
2154*4882a593Smuzhiyun *
2155*4882a593Smuzhiyun * The exchange destructor handler is also set in this routine.
2156*4882a593Smuzhiyun * The destructor handler is invoked by EM layer when exchange
2157*4882a593Smuzhiyun * is about to free, this can be used by caller to free its
2158*4882a593Smuzhiyun * resources along with exchange free.
2159*4882a593Smuzhiyun *
2160*4882a593Smuzhiyun * The arg is passed back to resp and destructor handler.
2161*4882a593Smuzhiyun *
2162*4882a593Smuzhiyun * The timeout value (in msec) for an exchange is set if non zero
2163*4882a593Smuzhiyun * timer_msec argument is specified. The timer is canceled when
2164*4882a593Smuzhiyun * it fires or when the exchange is done. The exchange timeout handler
2165*4882a593Smuzhiyun * is registered by EM layer.
2166*4882a593Smuzhiyun *
2167*4882a593Smuzhiyun * The frame pointer with some of the header's fields must be
2168*4882a593Smuzhiyun * filled before calling this routine, those fields are:
2169*4882a593Smuzhiyun *
2170*4882a593Smuzhiyun * - routing control
2171*4882a593Smuzhiyun * - FC port did
2172*4882a593Smuzhiyun * - FC port sid
2173*4882a593Smuzhiyun * - FC header type
2174*4882a593Smuzhiyun * - frame control
2175*4882a593Smuzhiyun * - parameter or relative offset
2176*4882a593Smuzhiyun */
fc_exch_seq_send(struct fc_lport * lport,struct fc_frame * fp,void (* resp)(struct fc_seq *,struct fc_frame * fp,void * arg),void (* destructor)(struct fc_seq *,void *),void * arg,u32 timer_msec)2177*4882a593Smuzhiyun struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2178*4882a593Smuzhiyun struct fc_frame *fp,
2179*4882a593Smuzhiyun void (*resp)(struct fc_seq *,
2180*4882a593Smuzhiyun struct fc_frame *fp,
2181*4882a593Smuzhiyun void *arg),
2182*4882a593Smuzhiyun void (*destructor)(struct fc_seq *, void *),
2183*4882a593Smuzhiyun void *arg, u32 timer_msec)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun struct fc_exch *ep;
2186*4882a593Smuzhiyun struct fc_seq *sp = NULL;
2187*4882a593Smuzhiyun struct fc_frame_header *fh;
2188*4882a593Smuzhiyun struct fc_fcp_pkt *fsp = NULL;
2189*4882a593Smuzhiyun int rc = 1;
2190*4882a593Smuzhiyun
2191*4882a593Smuzhiyun ep = fc_exch_alloc(lport, fp);
2192*4882a593Smuzhiyun if (!ep) {
2193*4882a593Smuzhiyun fc_frame_free(fp);
2194*4882a593Smuzhiyun return NULL;
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_SEQ_INIT;
2197*4882a593Smuzhiyun fh = fc_frame_header_get(fp);
2198*4882a593Smuzhiyun fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2199*4882a593Smuzhiyun ep->resp = resp;
2200*4882a593Smuzhiyun ep->destructor = destructor;
2201*4882a593Smuzhiyun ep->arg = arg;
2202*4882a593Smuzhiyun ep->r_a_tov = lport->r_a_tov;
2203*4882a593Smuzhiyun ep->lp = lport;
2204*4882a593Smuzhiyun sp = &ep->seq;
2205*4882a593Smuzhiyun
2206*4882a593Smuzhiyun ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
2207*4882a593Smuzhiyun ep->f_ctl = ntoh24(fh->fh_f_ctl);
2208*4882a593Smuzhiyun fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2209*4882a593Smuzhiyun sp->cnt++;
2210*4882a593Smuzhiyun
2211*4882a593Smuzhiyun if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2212*4882a593Smuzhiyun fsp = fr_fsp(fp);
2213*4882a593Smuzhiyun fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun if (unlikely(lport->tt.frame_send(lport, fp)))
2217*4882a593Smuzhiyun goto err;
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun if (timer_msec)
2220*4882a593Smuzhiyun fc_exch_timer_set_locked(ep, timer_msec);
2221*4882a593Smuzhiyun ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun if (ep->f_ctl & FC_FC_SEQ_INIT)
2224*4882a593Smuzhiyun ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2225*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2226*4882a593Smuzhiyun return sp;
2227*4882a593Smuzhiyun err:
2228*4882a593Smuzhiyun if (fsp)
2229*4882a593Smuzhiyun fc_fcp_ddp_done(fsp);
2230*4882a593Smuzhiyun rc = fc_exch_done_locked(ep);
2231*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2232*4882a593Smuzhiyun if (!rc)
2233*4882a593Smuzhiyun fc_exch_delete(ep);
2234*4882a593Smuzhiyun return NULL;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_seq_send);
2237*4882a593Smuzhiyun
2238*4882a593Smuzhiyun /**
2239*4882a593Smuzhiyun * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
2240*4882a593Smuzhiyun * @ep: The exchange to send the RRQ on
2241*4882a593Smuzhiyun *
2242*4882a593Smuzhiyun * This tells the remote port to stop blocking the use of
2243*4882a593Smuzhiyun * the exchange and the seq_cnt range.
2244*4882a593Smuzhiyun */
fc_exch_rrq(struct fc_exch * ep)2245*4882a593Smuzhiyun static void fc_exch_rrq(struct fc_exch *ep)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun struct fc_lport *lport;
2248*4882a593Smuzhiyun struct fc_els_rrq *rrq;
2249*4882a593Smuzhiyun struct fc_frame *fp;
2250*4882a593Smuzhiyun u32 did;
2251*4882a593Smuzhiyun
2252*4882a593Smuzhiyun lport = ep->lp;
2253*4882a593Smuzhiyun
2254*4882a593Smuzhiyun fp = fc_frame_alloc(lport, sizeof(*rrq));
2255*4882a593Smuzhiyun if (!fp)
2256*4882a593Smuzhiyun goto retry;
2257*4882a593Smuzhiyun
2258*4882a593Smuzhiyun rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2259*4882a593Smuzhiyun memset(rrq, 0, sizeof(*rrq));
2260*4882a593Smuzhiyun rrq->rrq_cmd = ELS_RRQ;
2261*4882a593Smuzhiyun hton24(rrq->rrq_s_id, ep->sid);
2262*4882a593Smuzhiyun rrq->rrq_ox_id = htons(ep->oxid);
2263*4882a593Smuzhiyun rrq->rrq_rx_id = htons(ep->rxid);
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun did = ep->did;
2266*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_RESP)
2267*4882a593Smuzhiyun did = ep->sid;
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2270*4882a593Smuzhiyun lport->port_id, FC_TYPE_ELS,
2271*4882a593Smuzhiyun FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2272*4882a593Smuzhiyun
2273*4882a593Smuzhiyun if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2274*4882a593Smuzhiyun lport->e_d_tov))
2275*4882a593Smuzhiyun return;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun retry:
2278*4882a593Smuzhiyun FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2279*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
2280*4882a593Smuzhiyun if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2281*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2282*4882a593Smuzhiyun /* drop hold for rec qual */
2283*4882a593Smuzhiyun fc_exch_release(ep);
2284*4882a593Smuzhiyun return;
2285*4882a593Smuzhiyun }
2286*4882a593Smuzhiyun ep->esb_stat |= ESB_ST_REC_QUAL;
2287*4882a593Smuzhiyun fc_exch_timer_set_locked(ep, ep->r_a_tov);
2288*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2289*4882a593Smuzhiyun }
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun /**
2292*4882a593Smuzhiyun * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
2293*4882a593Smuzhiyun * @fp: The RRQ frame, not freed here.
2294*4882a593Smuzhiyun */
fc_exch_els_rrq(struct fc_frame * fp)2295*4882a593Smuzhiyun static void fc_exch_els_rrq(struct fc_frame *fp)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun struct fc_lport *lport;
2298*4882a593Smuzhiyun struct fc_exch *ep = NULL; /* request or subject exchange */
2299*4882a593Smuzhiyun struct fc_els_rrq *rp;
2300*4882a593Smuzhiyun u32 sid;
2301*4882a593Smuzhiyun u16 xid;
2302*4882a593Smuzhiyun enum fc_els_rjt_explan explan;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun lport = fr_dev(fp);
2305*4882a593Smuzhiyun rp = fc_frame_payload_get(fp, sizeof(*rp));
2306*4882a593Smuzhiyun explan = ELS_EXPL_INV_LEN;
2307*4882a593Smuzhiyun if (!rp)
2308*4882a593Smuzhiyun goto reject;
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun /*
2311*4882a593Smuzhiyun * lookup subject exchange.
2312*4882a593Smuzhiyun */
2313*4882a593Smuzhiyun sid = ntoh24(rp->rrq_s_id); /* subject source */
2314*4882a593Smuzhiyun xid = fc_host_port_id(lport->host) == sid ?
2315*4882a593Smuzhiyun ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2316*4882a593Smuzhiyun ep = fc_exch_lookup(lport, xid);
2317*4882a593Smuzhiyun explan = ELS_EXPL_OXID_RXID;
2318*4882a593Smuzhiyun if (!ep)
2319*4882a593Smuzhiyun goto reject;
2320*4882a593Smuzhiyun spin_lock_bh(&ep->ex_lock);
2321*4882a593Smuzhiyun FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2322*4882a593Smuzhiyun sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2323*4882a593Smuzhiyun if (ep->oxid != ntohs(rp->rrq_ox_id))
2324*4882a593Smuzhiyun goto unlock_reject;
2325*4882a593Smuzhiyun if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2326*4882a593Smuzhiyun ep->rxid != FC_XID_UNKNOWN)
2327*4882a593Smuzhiyun goto unlock_reject;
2328*4882a593Smuzhiyun explan = ELS_EXPL_SID;
2329*4882a593Smuzhiyun if (ep->sid != sid)
2330*4882a593Smuzhiyun goto unlock_reject;
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun /*
2333*4882a593Smuzhiyun * Clear Recovery Qualifier state, and cancel timer if complete.
2334*4882a593Smuzhiyun */
2335*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_REC_QUAL) {
2336*4882a593Smuzhiyun ep->esb_stat &= ~ESB_ST_REC_QUAL;
2337*4882a593Smuzhiyun atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
2338*4882a593Smuzhiyun }
2339*4882a593Smuzhiyun if (ep->esb_stat & ESB_ST_COMPLETE)
2340*4882a593Smuzhiyun fc_exch_timer_cancel(ep);
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun /*
2345*4882a593Smuzhiyun * Send LS_ACC.
2346*4882a593Smuzhiyun */
2347*4882a593Smuzhiyun fc_seq_ls_acc(fp);
2348*4882a593Smuzhiyun goto out;
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun unlock_reject:
2351*4882a593Smuzhiyun spin_unlock_bh(&ep->ex_lock);
2352*4882a593Smuzhiyun reject:
2353*4882a593Smuzhiyun fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2354*4882a593Smuzhiyun out:
2355*4882a593Smuzhiyun if (ep)
2356*4882a593Smuzhiyun fc_exch_release(ep); /* drop hold from fc_exch_find */
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun /**
2360*4882a593Smuzhiyun * fc_exch_update_stats() - update exches stats to lport
2361*4882a593Smuzhiyun * @lport: The local port to update exchange manager stats
2362*4882a593Smuzhiyun */
fc_exch_update_stats(struct fc_lport * lport)2363*4882a593Smuzhiyun void fc_exch_update_stats(struct fc_lport *lport)
2364*4882a593Smuzhiyun {
2365*4882a593Smuzhiyun struct fc_host_statistics *st;
2366*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
2367*4882a593Smuzhiyun struct fc_exch_mgr *mp;
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun st = &lport->host_stats;
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list) {
2372*4882a593Smuzhiyun mp = ema->mp;
2373*4882a593Smuzhiyun st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2374*4882a593Smuzhiyun st->fc_no_free_exch_xid +=
2375*4882a593Smuzhiyun atomic_read(&mp->stats.no_free_exch_xid);
2376*4882a593Smuzhiyun st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2377*4882a593Smuzhiyun st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2378*4882a593Smuzhiyun st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2379*4882a593Smuzhiyun st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_update_stats);
2383*4882a593Smuzhiyun
2384*4882a593Smuzhiyun /**
2385*4882a593Smuzhiyun * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2386*4882a593Smuzhiyun * @lport: The local port to add the exchange manager to
2387*4882a593Smuzhiyun * @mp: The exchange manager to be added to the local port
2388*4882a593Smuzhiyun * @match: The match routine that indicates when this EM should be used
2389*4882a593Smuzhiyun */
fc_exch_mgr_add(struct fc_lport * lport,struct fc_exch_mgr * mp,bool (* match)(struct fc_frame *))2390*4882a593Smuzhiyun struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2391*4882a593Smuzhiyun struct fc_exch_mgr *mp,
2392*4882a593Smuzhiyun bool (*match)(struct fc_frame *))
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
2395*4882a593Smuzhiyun
2396*4882a593Smuzhiyun ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2397*4882a593Smuzhiyun if (!ema)
2398*4882a593Smuzhiyun return ema;
2399*4882a593Smuzhiyun
2400*4882a593Smuzhiyun ema->mp = mp;
2401*4882a593Smuzhiyun ema->match = match;
2402*4882a593Smuzhiyun /* add EM anchor to EM anchors list */
2403*4882a593Smuzhiyun list_add_tail(&ema->ema_list, &lport->ema_list);
2404*4882a593Smuzhiyun kref_get(&mp->kref);
2405*4882a593Smuzhiyun return ema;
2406*4882a593Smuzhiyun }
2407*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_add);
2408*4882a593Smuzhiyun
2409*4882a593Smuzhiyun /**
2410*4882a593Smuzhiyun * fc_exch_mgr_destroy() - Destroy an exchange manager
2411*4882a593Smuzhiyun * @kref: The reference to the EM to be destroyed
2412*4882a593Smuzhiyun */
fc_exch_mgr_destroy(struct kref * kref)2413*4882a593Smuzhiyun static void fc_exch_mgr_destroy(struct kref *kref)
2414*4882a593Smuzhiyun {
2415*4882a593Smuzhiyun struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun mempool_destroy(mp->ep_pool);
2418*4882a593Smuzhiyun free_percpu(mp->pool);
2419*4882a593Smuzhiyun kfree(mp);
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun /**
2423*4882a593Smuzhiyun * fc_exch_mgr_del() - Delete an EM from a local port's list
2424*4882a593Smuzhiyun * @ema: The exchange manager anchor identifying the EM to be deleted
2425*4882a593Smuzhiyun */
fc_exch_mgr_del(struct fc_exch_mgr_anchor * ema)2426*4882a593Smuzhiyun void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun /* remove EM anchor from EM anchors list */
2429*4882a593Smuzhiyun list_del(&ema->ema_list);
2430*4882a593Smuzhiyun kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2431*4882a593Smuzhiyun kfree(ema);
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_del);
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun /**
2436*4882a593Smuzhiyun * fc_exch_mgr_list_clone() - Share all exchange manager objects
2437*4882a593Smuzhiyun * @src: Source lport to clone exchange managers from
2438*4882a593Smuzhiyun * @dst: New lport that takes references to all the exchange managers
2439*4882a593Smuzhiyun */
fc_exch_mgr_list_clone(struct fc_lport * src,struct fc_lport * dst)2440*4882a593Smuzhiyun int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema, *tmp;
2443*4882a593Smuzhiyun
2444*4882a593Smuzhiyun list_for_each_entry(ema, &src->ema_list, ema_list) {
2445*4882a593Smuzhiyun if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2446*4882a593Smuzhiyun goto err;
2447*4882a593Smuzhiyun }
2448*4882a593Smuzhiyun return 0;
2449*4882a593Smuzhiyun err:
2450*4882a593Smuzhiyun list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2451*4882a593Smuzhiyun fc_exch_mgr_del(ema);
2452*4882a593Smuzhiyun return -ENOMEM;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun /**
2457*4882a593Smuzhiyun * fc_exch_mgr_alloc() - Allocate an exchange manager
2458*4882a593Smuzhiyun * @lport: The local port that the new EM will be associated with
2459*4882a593Smuzhiyun * @class: The default FC class for new exchanges
2460*4882a593Smuzhiyun * @min_xid: The minimum XID for exchanges from the new EM
2461*4882a593Smuzhiyun * @max_xid: The maximum XID for exchanges from the new EM
2462*4882a593Smuzhiyun * @match: The match routine for the new EM
2463*4882a593Smuzhiyun */
fc_exch_mgr_alloc(struct fc_lport * lport,enum fc_class class,u16 min_xid,u16 max_xid,bool (* match)(struct fc_frame *))2464*4882a593Smuzhiyun struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2465*4882a593Smuzhiyun enum fc_class class,
2466*4882a593Smuzhiyun u16 min_xid, u16 max_xid,
2467*4882a593Smuzhiyun bool (*match)(struct fc_frame *))
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun struct fc_exch_mgr *mp;
2470*4882a593Smuzhiyun u16 pool_exch_range;
2471*4882a593Smuzhiyun size_t pool_size;
2472*4882a593Smuzhiyun unsigned int cpu;
2473*4882a593Smuzhiyun struct fc_exch_pool *pool;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2476*4882a593Smuzhiyun (min_xid & fc_cpu_mask) != 0) {
2477*4882a593Smuzhiyun FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2478*4882a593Smuzhiyun min_xid, max_xid);
2479*4882a593Smuzhiyun return NULL;
2480*4882a593Smuzhiyun }
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun /*
2483*4882a593Smuzhiyun * allocate memory for EM
2484*4882a593Smuzhiyun */
2485*4882a593Smuzhiyun mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2486*4882a593Smuzhiyun if (!mp)
2487*4882a593Smuzhiyun return NULL;
2488*4882a593Smuzhiyun
2489*4882a593Smuzhiyun mp->class = class;
2490*4882a593Smuzhiyun mp->lport = lport;
2491*4882a593Smuzhiyun /* adjust em exch xid range for offload */
2492*4882a593Smuzhiyun mp->min_xid = min_xid;
2493*4882a593Smuzhiyun
2494*4882a593Smuzhiyun /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2495*4882a593Smuzhiyun pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2496*4882a593Smuzhiyun sizeof(struct fc_exch *);
2497*4882a593Smuzhiyun if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2498*4882a593Smuzhiyun mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2499*4882a593Smuzhiyun min_xid - 1;
2500*4882a593Smuzhiyun } else {
2501*4882a593Smuzhiyun mp->max_xid = max_xid;
2502*4882a593Smuzhiyun pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2503*4882a593Smuzhiyun (fc_cpu_mask + 1);
2504*4882a593Smuzhiyun }
2505*4882a593Smuzhiyun
2506*4882a593Smuzhiyun mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2507*4882a593Smuzhiyun if (!mp->ep_pool)
2508*4882a593Smuzhiyun goto free_mp;
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun /*
2511*4882a593Smuzhiyun * Setup per cpu exch pool with entire exchange id range equally
2512*4882a593Smuzhiyun * divided across all cpus. The exch pointers array memory is
2513*4882a593Smuzhiyun * allocated for exch range per pool.
2514*4882a593Smuzhiyun */
2515*4882a593Smuzhiyun mp->pool_max_index = pool_exch_range - 1;
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun /*
2518*4882a593Smuzhiyun * Allocate and initialize per cpu exch pool
2519*4882a593Smuzhiyun */
2520*4882a593Smuzhiyun pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2521*4882a593Smuzhiyun mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2522*4882a593Smuzhiyun if (!mp->pool)
2523*4882a593Smuzhiyun goto free_mempool;
2524*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
2525*4882a593Smuzhiyun pool = per_cpu_ptr(mp->pool, cpu);
2526*4882a593Smuzhiyun pool->next_index = 0;
2527*4882a593Smuzhiyun pool->left = FC_XID_UNKNOWN;
2528*4882a593Smuzhiyun pool->right = FC_XID_UNKNOWN;
2529*4882a593Smuzhiyun spin_lock_init(&pool->lock);
2530*4882a593Smuzhiyun INIT_LIST_HEAD(&pool->ex_list);
2531*4882a593Smuzhiyun }
2532*4882a593Smuzhiyun
2533*4882a593Smuzhiyun kref_init(&mp->kref);
2534*4882a593Smuzhiyun if (!fc_exch_mgr_add(lport, mp, match)) {
2535*4882a593Smuzhiyun free_percpu(mp->pool);
2536*4882a593Smuzhiyun goto free_mempool;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun
2539*4882a593Smuzhiyun /*
2540*4882a593Smuzhiyun * Above kref_init() sets mp->kref to 1 and then
2541*4882a593Smuzhiyun * call to fc_exch_mgr_add incremented mp->kref again,
2542*4882a593Smuzhiyun * so adjust that extra increment.
2543*4882a593Smuzhiyun */
2544*4882a593Smuzhiyun kref_put(&mp->kref, fc_exch_mgr_destroy);
2545*4882a593Smuzhiyun return mp;
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun free_mempool:
2548*4882a593Smuzhiyun mempool_destroy(mp->ep_pool);
2549*4882a593Smuzhiyun free_mp:
2550*4882a593Smuzhiyun kfree(mp);
2551*4882a593Smuzhiyun return NULL;
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_alloc);
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun /**
2556*4882a593Smuzhiyun * fc_exch_mgr_free() - Free all exchange managers on a local port
2557*4882a593Smuzhiyun * @lport: The local port whose EMs are to be freed
2558*4882a593Smuzhiyun */
fc_exch_mgr_free(struct fc_lport * lport)2559*4882a593Smuzhiyun void fc_exch_mgr_free(struct fc_lport *lport)
2560*4882a593Smuzhiyun {
2561*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema, *next;
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun flush_workqueue(fc_exch_workqueue);
2564*4882a593Smuzhiyun list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2565*4882a593Smuzhiyun fc_exch_mgr_del(ema);
2566*4882a593Smuzhiyun }
2567*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_mgr_free);
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun /**
2570*4882a593Smuzhiyun * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
2571*4882a593Smuzhiyun * upon 'xid'.
2572*4882a593Smuzhiyun * @f_ctl: f_ctl
2573*4882a593Smuzhiyun * @lport: The local port the frame was received on
2574*4882a593Smuzhiyun * @fh: The received frame header
2575*4882a593Smuzhiyun */
fc_find_ema(u32 f_ctl,struct fc_lport * lport,struct fc_frame_header * fh)2576*4882a593Smuzhiyun static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2577*4882a593Smuzhiyun struct fc_lport *lport,
2578*4882a593Smuzhiyun struct fc_frame_header *fh)
2579*4882a593Smuzhiyun {
2580*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
2581*4882a593Smuzhiyun u16 xid;
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun if (f_ctl & FC_FC_EX_CTX)
2584*4882a593Smuzhiyun xid = ntohs(fh->fh_ox_id);
2585*4882a593Smuzhiyun else {
2586*4882a593Smuzhiyun xid = ntohs(fh->fh_rx_id);
2587*4882a593Smuzhiyun if (xid == FC_XID_UNKNOWN)
2588*4882a593Smuzhiyun return list_entry(lport->ema_list.prev,
2589*4882a593Smuzhiyun typeof(*ema), ema_list);
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun
2592*4882a593Smuzhiyun list_for_each_entry(ema, &lport->ema_list, ema_list) {
2593*4882a593Smuzhiyun if ((xid >= ema->mp->min_xid) &&
2594*4882a593Smuzhiyun (xid <= ema->mp->max_xid))
2595*4882a593Smuzhiyun return ema;
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun return NULL;
2598*4882a593Smuzhiyun }
2599*4882a593Smuzhiyun /**
2600*4882a593Smuzhiyun * fc_exch_recv() - Handler for received frames
2601*4882a593Smuzhiyun * @lport: The local port the frame was received on
2602*4882a593Smuzhiyun * @fp: The received frame
2603*4882a593Smuzhiyun */
fc_exch_recv(struct fc_lport * lport,struct fc_frame * fp)2604*4882a593Smuzhiyun void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2605*4882a593Smuzhiyun {
2606*4882a593Smuzhiyun struct fc_frame_header *fh = fc_frame_header_get(fp);
2607*4882a593Smuzhiyun struct fc_exch_mgr_anchor *ema;
2608*4882a593Smuzhiyun u32 f_ctl;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun /* lport lock ? */
2611*4882a593Smuzhiyun if (!lport || lport->state == LPORT_ST_DISABLED) {
2612*4882a593Smuzhiyun FC_LIBFC_DBG("Receiving frames for an lport that "
2613*4882a593Smuzhiyun "has not been initialized correctly\n");
2614*4882a593Smuzhiyun fc_frame_free(fp);
2615*4882a593Smuzhiyun return;
2616*4882a593Smuzhiyun }
2617*4882a593Smuzhiyun
2618*4882a593Smuzhiyun f_ctl = ntoh24(fh->fh_f_ctl);
2619*4882a593Smuzhiyun ema = fc_find_ema(f_ctl, lport, fh);
2620*4882a593Smuzhiyun if (!ema) {
2621*4882a593Smuzhiyun FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2622*4882a593Smuzhiyun "fc_ctl <0x%x>, xid <0x%x>\n",
2623*4882a593Smuzhiyun f_ctl,
2624*4882a593Smuzhiyun (f_ctl & FC_FC_EX_CTX) ?
2625*4882a593Smuzhiyun ntohs(fh->fh_ox_id) :
2626*4882a593Smuzhiyun ntohs(fh->fh_rx_id));
2627*4882a593Smuzhiyun fc_frame_free(fp);
2628*4882a593Smuzhiyun return;
2629*4882a593Smuzhiyun }
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun /*
2632*4882a593Smuzhiyun * If frame is marked invalid, just drop it.
2633*4882a593Smuzhiyun */
2634*4882a593Smuzhiyun switch (fr_eof(fp)) {
2635*4882a593Smuzhiyun case FC_EOF_T:
2636*4882a593Smuzhiyun if (f_ctl & FC_FC_END_SEQ)
2637*4882a593Smuzhiyun skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2638*4882a593Smuzhiyun fallthrough;
2639*4882a593Smuzhiyun case FC_EOF_N:
2640*4882a593Smuzhiyun if (fh->fh_type == FC_TYPE_BLS)
2641*4882a593Smuzhiyun fc_exch_recv_bls(ema->mp, fp);
2642*4882a593Smuzhiyun else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2643*4882a593Smuzhiyun FC_FC_EX_CTX)
2644*4882a593Smuzhiyun fc_exch_recv_seq_resp(ema->mp, fp);
2645*4882a593Smuzhiyun else if (f_ctl & FC_FC_SEQ_CTX)
2646*4882a593Smuzhiyun fc_exch_recv_resp(ema->mp, fp);
2647*4882a593Smuzhiyun else /* no EX_CTX and no SEQ_CTX */
2648*4882a593Smuzhiyun fc_exch_recv_req(lport, ema->mp, fp);
2649*4882a593Smuzhiyun break;
2650*4882a593Smuzhiyun default:
2651*4882a593Smuzhiyun FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2652*4882a593Smuzhiyun fr_eof(fp));
2653*4882a593Smuzhiyun fc_frame_free(fp);
2654*4882a593Smuzhiyun }
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_recv);
2657*4882a593Smuzhiyun
2658*4882a593Smuzhiyun /**
2659*4882a593Smuzhiyun * fc_exch_init() - Initialize the exchange layer for a local port
2660*4882a593Smuzhiyun * @lport: The local port to initialize the exchange layer for
2661*4882a593Smuzhiyun */
fc_exch_init(struct fc_lport * lport)2662*4882a593Smuzhiyun int fc_exch_init(struct fc_lport *lport)
2663*4882a593Smuzhiyun {
2664*4882a593Smuzhiyun if (!lport->tt.exch_mgr_reset)
2665*4882a593Smuzhiyun lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2666*4882a593Smuzhiyun
2667*4882a593Smuzhiyun return 0;
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun EXPORT_SYMBOL(fc_exch_init);
2670*4882a593Smuzhiyun
2671*4882a593Smuzhiyun /**
2672*4882a593Smuzhiyun * fc_setup_exch_mgr() - Setup an exchange manager
2673*4882a593Smuzhiyun */
fc_setup_exch_mgr(void)2674*4882a593Smuzhiyun int fc_setup_exch_mgr(void)
2675*4882a593Smuzhiyun {
2676*4882a593Smuzhiyun fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2677*4882a593Smuzhiyun 0, SLAB_HWCACHE_ALIGN, NULL);
2678*4882a593Smuzhiyun if (!fc_em_cachep)
2679*4882a593Smuzhiyun return -ENOMEM;
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun /*
2682*4882a593Smuzhiyun * Initialize fc_cpu_mask and fc_cpu_order. The
2683*4882a593Smuzhiyun * fc_cpu_mask is set for nr_cpu_ids rounded up
2684*4882a593Smuzhiyun * to order of 2's * power and order is stored
2685*4882a593Smuzhiyun * in fc_cpu_order as this is later required in
2686*4882a593Smuzhiyun * mapping between an exch id and exch array index
2687*4882a593Smuzhiyun * in per cpu exch pool.
2688*4882a593Smuzhiyun *
2689*4882a593Smuzhiyun * This round up is required to align fc_cpu_mask
2690*4882a593Smuzhiyun * to exchange id's lower bits such that all incoming
2691*4882a593Smuzhiyun * frames of an exchange gets delivered to the same
2692*4882a593Smuzhiyun * cpu on which exchange originated by simple bitwise
2693*4882a593Smuzhiyun * AND operation between fc_cpu_mask and exchange id.
2694*4882a593Smuzhiyun */
2695*4882a593Smuzhiyun fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2696*4882a593Smuzhiyun fc_cpu_mask = (1 << fc_cpu_order) - 1;
2697*4882a593Smuzhiyun
2698*4882a593Smuzhiyun fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2699*4882a593Smuzhiyun if (!fc_exch_workqueue)
2700*4882a593Smuzhiyun goto err;
2701*4882a593Smuzhiyun return 0;
2702*4882a593Smuzhiyun err:
2703*4882a593Smuzhiyun kmem_cache_destroy(fc_em_cachep);
2704*4882a593Smuzhiyun return -ENOMEM;
2705*4882a593Smuzhiyun }
2706*4882a593Smuzhiyun
2707*4882a593Smuzhiyun /**
2708*4882a593Smuzhiyun * fc_destroy_exch_mgr() - Destroy an exchange manager
2709*4882a593Smuzhiyun */
fc_destroy_exch_mgr(void)2710*4882a593Smuzhiyun void fc_destroy_exch_mgr(void)
2711*4882a593Smuzhiyun {
2712*4882a593Smuzhiyun destroy_workqueue(fc_exch_workqueue);
2713*4882a593Smuzhiyun kmem_cache_destroy(fc_em_cachep);
2714*4882a593Smuzhiyun }
2715