xref: /OK3568_Linux_fs/kernel/drivers/scsi/qla2xxx/qla_inline.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * QLogic Fibre Channel HBA Driver
4*4882a593Smuzhiyun  * Copyright (c)  2003-2014 QLogic Corporation
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "qla_target.h"
8*4882a593Smuzhiyun /**
9*4882a593Smuzhiyun  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10*4882a593Smuzhiyun  * Continuation Type 1 IOCBs to allocate.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * @vha: HA context
13*4882a593Smuzhiyun  * @dsds: number of data segment descriptors needed
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * Returns the number of IOCB entries needed to store @dsds.
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun static inline uint16_t
qla24xx_calc_iocbs(scsi_qla_host_t * vha,uint16_t dsds)18*4882a593Smuzhiyun qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	uint16_t iocbs;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	iocbs = 1;
23*4882a593Smuzhiyun 	if (dsds > 1) {
24*4882a593Smuzhiyun 		iocbs += (dsds - 1) / 5;
25*4882a593Smuzhiyun 		if ((dsds - 1) % 5)
26*4882a593Smuzhiyun 			iocbs++;
27*4882a593Smuzhiyun 	}
28*4882a593Smuzhiyun 	return iocbs;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * qla2x00_debounce_register
33*4882a593Smuzhiyun  *      Debounce register.
34*4882a593Smuzhiyun  *
35*4882a593Smuzhiyun  * Input:
36*4882a593Smuzhiyun  *      port = register address.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Returns:
39*4882a593Smuzhiyun  *      register value.
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun static __inline__ uint16_t
qla2x00_debounce_register(volatile __le16 __iomem * addr)42*4882a593Smuzhiyun qla2x00_debounce_register(volatile __le16 __iomem *addr)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	volatile uint16_t first;
45*4882a593Smuzhiyun 	volatile uint16_t second;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	do {
48*4882a593Smuzhiyun 		first = rd_reg_word(addr);
49*4882a593Smuzhiyun 		barrier();
50*4882a593Smuzhiyun 		cpu_relax();
51*4882a593Smuzhiyun 		second = rd_reg_word(addr);
52*4882a593Smuzhiyun 	} while (first != second);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return (first);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static inline void
qla2x00_poll(struct rsp_que * rsp)58*4882a593Smuzhiyun qla2x00_poll(struct rsp_que *rsp)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct qla_hw_data *ha = rsp->hw;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (IS_P3P_TYPE(ha))
63*4882a593Smuzhiyun 		qla82xx_poll(0, rsp);
64*4882a593Smuzhiyun 	else
65*4882a593Smuzhiyun 		ha->isp_ops->intr_handler(0, rsp);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static inline uint8_t *
host_to_fcp_swap(uint8_t * fcp,uint32_t bsize)69*4882a593Smuzhiyun host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun        uint32_t *ifcp = (uint32_t *) fcp;
72*4882a593Smuzhiyun        uint32_t *ofcp = (uint32_t *) fcp;
73*4882a593Smuzhiyun        uint32_t iter = bsize >> 2;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun        for (; iter ; iter--)
76*4882a593Smuzhiyun                *ofcp++ = swab32(*ifcp++);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun        return fcp;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static inline void
host_to_adap(uint8_t * src,uint8_t * dst,uint32_t bsize)82*4882a593Smuzhiyun host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	uint32_t *isrc = (uint32_t *) src;
85*4882a593Smuzhiyun 	__le32 *odest = (__le32 *) dst;
86*4882a593Smuzhiyun 	uint32_t iter = bsize >> 2;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	for ( ; iter--; isrc++)
89*4882a593Smuzhiyun 		*odest++ = cpu_to_le32(*isrc);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static inline void
qla2x00_clean_dsd_pool(struct qla_hw_data * ha,struct crc_context * ctx)93*4882a593Smuzhiyun qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct dsd_dma *dsd, *tdsd;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/* clean up allocated prev pool */
98*4882a593Smuzhiyun 	list_for_each_entry_safe(dsd, tdsd, &ctx->dsd_list, list) {
99*4882a593Smuzhiyun 		dma_pool_free(ha->dl_dma_pool, dsd->dsd_addr,
100*4882a593Smuzhiyun 		    dsd->dsd_list_dma);
101*4882a593Smuzhiyun 		list_del(&dsd->list);
102*4882a593Smuzhiyun 		kfree(dsd);
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ctx->dsd_list);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun static inline void
qla2x00_set_fcport_disc_state(fc_port_t * fcport,int state)108*4882a593Smuzhiyun qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	int old_val;
111*4882a593Smuzhiyun 	uint8_t shiftbits, mask;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* This will have to change when the max no. of states > 16 */
114*4882a593Smuzhiyun 	shiftbits = 4;
115*4882a593Smuzhiyun 	mask = (1 << shiftbits) - 1;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	fcport->disc_state = state;
118*4882a593Smuzhiyun 	while (1) {
119*4882a593Smuzhiyun 		old_val = atomic_read(&fcport->shadow_disc_state);
120*4882a593Smuzhiyun 		if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state,
121*4882a593Smuzhiyun 		    old_val, (old_val << shiftbits) | state)) {
122*4882a593Smuzhiyun 			ql_dbg(ql_dbg_disc, fcport->vha, 0x2134,
123*4882a593Smuzhiyun 			    "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n",
124*4882a593Smuzhiyun 			    fcport->port_name, port_dstate_str[old_val & mask],
125*4882a593Smuzhiyun 			    port_dstate_str[state], fcport->d_id.b24);
126*4882a593Smuzhiyun 			return;
127*4882a593Smuzhiyun 		}
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun static inline int
qla2x00_hba_err_chk_enabled(srb_t * sp)132*4882a593Smuzhiyun qla2x00_hba_err_chk_enabled(srb_t *sp)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	/*
135*4882a593Smuzhiyun 	 * Uncomment when corresponding SCSI changes are done.
136*4882a593Smuzhiyun 	 *
137*4882a593Smuzhiyun 	if (!sp->cmd->prot_chk)
138*4882a593Smuzhiyun 		return 0;
139*4882a593Smuzhiyun 	 *
140*4882a593Smuzhiyun 	 */
141*4882a593Smuzhiyun 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
142*4882a593Smuzhiyun 	case SCSI_PROT_READ_STRIP:
143*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_INSERT:
144*4882a593Smuzhiyun 		if (ql2xenablehba_err_chk >= 1)
145*4882a593Smuzhiyun 			return 1;
146*4882a593Smuzhiyun 		break;
147*4882a593Smuzhiyun 	case SCSI_PROT_READ_PASS:
148*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_PASS:
149*4882a593Smuzhiyun 		if (ql2xenablehba_err_chk >= 2)
150*4882a593Smuzhiyun 			return 1;
151*4882a593Smuzhiyun 		break;
152*4882a593Smuzhiyun 	case SCSI_PROT_READ_INSERT:
153*4882a593Smuzhiyun 	case SCSI_PROT_WRITE_STRIP:
154*4882a593Smuzhiyun 		return 1;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 	return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static inline int
qla2x00_reset_active(scsi_qla_host_t * vha)160*4882a593Smuzhiyun qla2x00_reset_active(scsi_qla_host_t *vha)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	/* Test appropriate base-vha and vha flags. */
165*4882a593Smuzhiyun 	return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
166*4882a593Smuzhiyun 	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
167*4882a593Smuzhiyun 	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
168*4882a593Smuzhiyun 	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
169*4882a593Smuzhiyun 	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun static inline int
qla2x00_chip_is_down(scsi_qla_host_t * vha)173*4882a593Smuzhiyun qla2x00_chip_is_down(scsi_qla_host_t *vha)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
qla2xxx_init_sp(srb_t * sp,scsi_qla_host_t * vha,struct qla_qpair * qpair,fc_port_t * fcport)178*4882a593Smuzhiyun static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha,
179*4882a593Smuzhiyun 			    struct qla_qpair *qpair, fc_port_t *fcport)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	memset(sp, 0, sizeof(*sp));
182*4882a593Smuzhiyun 	sp->fcport = fcport;
183*4882a593Smuzhiyun 	sp->iocbs = 1;
184*4882a593Smuzhiyun 	sp->vha = vha;
185*4882a593Smuzhiyun 	sp->qpair = qpair;
186*4882a593Smuzhiyun 	sp->cmd_type = TYPE_SRB;
187*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sp->elem);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static inline srb_t *
qla2xxx_get_qpair_sp(scsi_qla_host_t * vha,struct qla_qpair * qpair,fc_port_t * fcport,gfp_t flag)191*4882a593Smuzhiyun qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
192*4882a593Smuzhiyun     fc_port_t *fcport, gfp_t flag)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	srb_t *sp = NULL;
195*4882a593Smuzhiyun 	uint8_t bail;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	QLA_QPAIR_MARK_BUSY(qpair, bail);
198*4882a593Smuzhiyun 	if (unlikely(bail))
199*4882a593Smuzhiyun 		return NULL;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	sp = mempool_alloc(qpair->srb_mempool, flag);
202*4882a593Smuzhiyun 	if (sp)
203*4882a593Smuzhiyun 		qla2xxx_init_sp(sp, vha, qpair, fcport);
204*4882a593Smuzhiyun 	else
205*4882a593Smuzhiyun 		QLA_QPAIR_MARK_NOT_BUSY(qpair);
206*4882a593Smuzhiyun 	return sp;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun void qla2xxx_rel_done_warning(srb_t *sp, int res);
210*4882a593Smuzhiyun void qla2xxx_rel_free_warning(srb_t *sp);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair * qpair,srb_t * sp)213*4882a593Smuzhiyun qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	sp->qpair = NULL;
216*4882a593Smuzhiyun 	sp->done = qla2xxx_rel_done_warning;
217*4882a593Smuzhiyun 	sp->free = qla2xxx_rel_free_warning;
218*4882a593Smuzhiyun 	mempool_free(sp, qpair->srb_mempool);
219*4882a593Smuzhiyun 	QLA_QPAIR_MARK_NOT_BUSY(qpair);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun static inline srb_t *
qla2x00_get_sp(scsi_qla_host_t * vha,fc_port_t * fcport,gfp_t flag)223*4882a593Smuzhiyun qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	srb_t *sp = NULL;
226*4882a593Smuzhiyun 	uint8_t bail;
227*4882a593Smuzhiyun 	struct qla_qpair *qpair;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	QLA_VHA_MARK_BUSY(vha, bail);
230*4882a593Smuzhiyun 	if (unlikely(bail))
231*4882a593Smuzhiyun 		return NULL;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	qpair = vha->hw->base_qpair;
234*4882a593Smuzhiyun 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
235*4882a593Smuzhiyun 	if (!sp)
236*4882a593Smuzhiyun 		goto done;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	sp->vha = vha;
239*4882a593Smuzhiyun done:
240*4882a593Smuzhiyun 	if (!sp)
241*4882a593Smuzhiyun 		QLA_VHA_MARK_NOT_BUSY(vha);
242*4882a593Smuzhiyun 	return sp;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun static inline void
qla2x00_rel_sp(srb_t * sp)246*4882a593Smuzhiyun qla2x00_rel_sp(srb_t *sp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	QLA_VHA_MARK_NOT_BUSY(sp->vha);
249*4882a593Smuzhiyun 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun static inline int
qla2x00_gid_list_size(struct qla_hw_data * ha)253*4882a593Smuzhiyun qla2x00_gid_list_size(struct qla_hw_data *ha)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	if (IS_QLAFX00(ha))
256*4882a593Smuzhiyun 		return sizeof(uint32_t) * 32;
257*4882a593Smuzhiyun 	else
258*4882a593Smuzhiyun 		return sizeof(struct gid_list_info) * ha->max_fibre_devices;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun static inline void
qla2x00_handle_mbx_completion(struct qla_hw_data * ha,int status)262*4882a593Smuzhiyun qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
265*4882a593Smuzhiyun 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
266*4882a593Smuzhiyun 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
267*4882a593Smuzhiyun 		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
268*4882a593Smuzhiyun 		complete(&ha->mbx_intr_comp);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun static inline void
qla2x00_set_retry_delay_timestamp(fc_port_t * fcport,uint16_t sts_qual)273*4882a593Smuzhiyun qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	u8 scope;
276*4882a593Smuzhiyun 	u16 qual;
277*4882a593Smuzhiyun #define SQ_SCOPE_MASK		0xc000 /* SAM-6 rev5 5.3.2 */
278*4882a593Smuzhiyun #define SQ_SCOPE_SHIFT		14
279*4882a593Smuzhiyun #define SQ_QUAL_MASK		0x3fff
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define SQ_MAX_WAIT_SEC		60 /* Max I/O hold off time in seconds. */
282*4882a593Smuzhiyun #define SQ_MAX_WAIT_TIME	(SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (!sts_qual) /* Common case. */
285*4882a593Smuzhiyun 		return;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
288*4882a593Smuzhiyun 	/* Handle only scope 1 or 2, which is for I-T nexus. */
289*4882a593Smuzhiyun 	if (scope != 1 && scope != 2)
290*4882a593Smuzhiyun 		return;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* Skip processing, if retry delay timer is already in effect. */
293*4882a593Smuzhiyun 	if (fcport->retry_delay_timestamp &&
294*4882a593Smuzhiyun 	    time_before(jiffies, fcport->retry_delay_timestamp))
295*4882a593Smuzhiyun 		return;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	qual = sts_qual & SQ_QUAL_MASK;
298*4882a593Smuzhiyun 	if (qual < 1 || qual > 0x3fef)
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 	qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* qual is expressed in 100ms increments. */
303*4882a593Smuzhiyun 	fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	ql_log(ql_log_warn, fcport->vha, 0x5101,
306*4882a593Smuzhiyun 	       "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
307*4882a593Smuzhiyun 	       fcport->port_name, sts_qual, qual * 100);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static inline bool
qla_is_exch_offld_enabled(struct scsi_qla_host * vha)311*4882a593Smuzhiyun qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	if (qla_ini_mode_enabled(vha) &&
314*4882a593Smuzhiyun 	    (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
315*4882a593Smuzhiyun 		return true;
316*4882a593Smuzhiyun 	else if (qla_tgt_mode_enabled(vha) &&
317*4882a593Smuzhiyun 	    (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
318*4882a593Smuzhiyun 		return true;
319*4882a593Smuzhiyun 	else if (qla_dual_mode_enabled(vha) &&
320*4882a593Smuzhiyun 	    ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
321*4882a593Smuzhiyun 		return true;
322*4882a593Smuzhiyun 	else
323*4882a593Smuzhiyun 		return false;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun static inline void
qla_cpu_update(struct qla_qpair * qpair,uint16_t cpuid)327*4882a593Smuzhiyun qla_cpu_update(struct qla_qpair *qpair, uint16_t cpuid)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	qpair->cpuid = cpuid;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (!list_empty(&qpair->hints_list)) {
332*4882a593Smuzhiyun 		struct qla_qpair_hint *h;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		list_for_each_entry(h, &qpair->hints_list, hint_elem)
335*4882a593Smuzhiyun 			h->cpuid = qpair->cpuid;
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun static inline struct qla_qpair_hint *
qla_qpair_to_hint(struct qla_tgt * tgt,struct qla_qpair * qpair)340*4882a593Smuzhiyun qla_qpair_to_hint(struct qla_tgt *tgt, struct qla_qpair *qpair)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct qla_qpair_hint *h;
343*4882a593Smuzhiyun 	u16 i;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	for (i = 0; i < tgt->ha->max_qpairs + 1; i++) {
346*4882a593Smuzhiyun 		h = &tgt->qphints[i];
347*4882a593Smuzhiyun 		if (h->qpair == qpair)
348*4882a593Smuzhiyun 			return h;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	return NULL;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun static inline void
qla_83xx_start_iocbs(struct qla_qpair * qpair)355*4882a593Smuzhiyun qla_83xx_start_iocbs(struct qla_qpair *qpair)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	struct req_que *req = qpair->req;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	req->ring_index++;
360*4882a593Smuzhiyun 	if (req->ring_index == req->length) {
361*4882a593Smuzhiyun 		req->ring_index = 0;
362*4882a593Smuzhiyun 		req->ring_ptr = req->ring;
363*4882a593Smuzhiyun 	} else
364*4882a593Smuzhiyun 		req->ring_ptr++;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	wrt_reg_dword(req->req_q_in, req->ring_index);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun static inline int
qla2xxx_get_fc4_priority(struct scsi_qla_host * vha)370*4882a593Smuzhiyun qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	uint32_t data;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	data =
375*4882a593Smuzhiyun 	    ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET];
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun enum {
382*4882a593Smuzhiyun 	RESOURCE_NONE,
383*4882a593Smuzhiyun 	RESOURCE_INI,
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun static inline int
qla_get_iocbs(struct qla_qpair * qp,struct iocb_resource * iores)387*4882a593Smuzhiyun qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	u16 iocbs_used, i;
390*4882a593Smuzhiyun 	struct qla_hw_data *ha = qp->vha->hw;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (!ql2xenforce_iocb_limit) {
393*4882a593Smuzhiyun 		iores->res_type = RESOURCE_NONE;
394*4882a593Smuzhiyun 		return 0;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
398*4882a593Smuzhiyun 		qp->fwres.iocbs_used += iores->iocb_cnt;
399*4882a593Smuzhiyun 		return 0;
400*4882a593Smuzhiyun 	} else {
401*4882a593Smuzhiyun 		/* no need to acquire qpair lock. It's just rough calculation */
402*4882a593Smuzhiyun 		iocbs_used = ha->base_qpair->fwres.iocbs_used;
403*4882a593Smuzhiyun 		for (i = 0; i < ha->max_qpairs; i++) {
404*4882a593Smuzhiyun 			if (ha->queue_pair_map[i])
405*4882a593Smuzhiyun 				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
409*4882a593Smuzhiyun 			qp->fwres.iocbs_used += iores->iocb_cnt;
410*4882a593Smuzhiyun 			return 0;
411*4882a593Smuzhiyun 		} else {
412*4882a593Smuzhiyun 			iores->res_type = RESOURCE_NONE;
413*4882a593Smuzhiyun 			return -ENOSPC;
414*4882a593Smuzhiyun 		}
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun static inline void
qla_put_iocbs(struct qla_qpair * qp,struct iocb_resource * iores)419*4882a593Smuzhiyun qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	switch (iores->res_type) {
422*4882a593Smuzhiyun 	case RESOURCE_NONE:
423*4882a593Smuzhiyun 		break;
424*4882a593Smuzhiyun 	default:
425*4882a593Smuzhiyun 		if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
426*4882a593Smuzhiyun 			qp->fwres.iocbs_used -= iores->iocb_cnt;
427*4882a593Smuzhiyun 		} else {
428*4882a593Smuzhiyun 			// should not happen
429*4882a593Smuzhiyun 			qp->fwres.iocbs_used = 0;
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 		break;
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 	iores->res_type = RESOURCE_NONE;
434*4882a593Smuzhiyun }
435