xref: /OK3568_Linux_fs/kernel/drivers/scsi/cxlflash/main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CXL Flash Device Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6*4882a593Smuzhiyun  *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 2015 IBM Corporation
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/list.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <asm/unaligned.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
19*4882a593Smuzhiyun #include <scsi/scsi_host.h>
20*4882a593Smuzhiyun #include <uapi/scsi/cxlflash_ioctl.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "main.h"
23*4882a593Smuzhiyun #include "sislite.h"
24*4882a593Smuzhiyun #include "common.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27*4882a593Smuzhiyun MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28*4882a593Smuzhiyun MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29*4882a593Smuzhiyun MODULE_LICENSE("GPL");
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static struct class *cxlflash_class;
32*4882a593Smuzhiyun static u32 cxlflash_major;
33*4882a593Smuzhiyun static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun  * process_cmd_err() - command error handler
37*4882a593Smuzhiyun  * @cmd:	AFU command that experienced the error.
38*4882a593Smuzhiyun  * @scp:	SCSI command associated with the AFU command in error.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * Translates error bits from AFU command to SCSI command results.
41*4882a593Smuzhiyun  */
process_cmd_err(struct afu_cmd * cmd,struct scsi_cmnd * scp)42*4882a593Smuzhiyun static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct afu *afu = cmd->parent;
45*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
46*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
47*4882a593Smuzhiyun 	struct sisl_ioasa *ioasa;
48*4882a593Smuzhiyun 	u32 resid;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	ioasa = &(cmd->sa);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
53*4882a593Smuzhiyun 		resid = ioasa->resid;
54*4882a593Smuzhiyun 		scsi_set_resid(scp, resid);
55*4882a593Smuzhiyun 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
56*4882a593Smuzhiyun 			__func__, cmd, scp, resid);
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
60*4882a593Smuzhiyun 		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
61*4882a593Smuzhiyun 			__func__, cmd, scp);
62*4882a593Smuzhiyun 		scp->result = (DID_ERROR << 16);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
66*4882a593Smuzhiyun 		"afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
67*4882a593Smuzhiyun 		ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
68*4882a593Smuzhiyun 		ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (ioasa->rc.scsi_rc) {
71*4882a593Smuzhiyun 		/* We have a SCSI status */
72*4882a593Smuzhiyun 		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
73*4882a593Smuzhiyun 			memcpy(scp->sense_buffer, ioasa->sense_data,
74*4882a593Smuzhiyun 			       SISL_SENSE_DATA_LEN);
75*4882a593Smuzhiyun 			scp->result = ioasa->rc.scsi_rc;
76*4882a593Smuzhiyun 		} else
77*4882a593Smuzhiyun 			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * We encountered an error. Set scp->result based on nature
82*4882a593Smuzhiyun 	 * of error.
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	if (ioasa->rc.fc_rc) {
85*4882a593Smuzhiyun 		/* We have an FC status */
86*4882a593Smuzhiyun 		switch (ioasa->rc.fc_rc) {
87*4882a593Smuzhiyun 		case SISL_FC_RC_LINKDOWN:
88*4882a593Smuzhiyun 			scp->result = (DID_REQUEUE << 16);
89*4882a593Smuzhiyun 			break;
90*4882a593Smuzhiyun 		case SISL_FC_RC_RESID:
91*4882a593Smuzhiyun 			/* This indicates an FCP resid underrun */
92*4882a593Smuzhiyun 			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
93*4882a593Smuzhiyun 				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
94*4882a593Smuzhiyun 				 * then we will handle this error else where.
95*4882a593Smuzhiyun 				 * If not then we must handle it here.
96*4882a593Smuzhiyun 				 * This is probably an AFU bug.
97*4882a593Smuzhiyun 				 */
98*4882a593Smuzhiyun 				scp->result = (DID_ERROR << 16);
99*4882a593Smuzhiyun 			}
100*4882a593Smuzhiyun 			break;
101*4882a593Smuzhiyun 		case SISL_FC_RC_RESIDERR:
102*4882a593Smuzhiyun 			/* Resid mismatch between adapter and device */
103*4882a593Smuzhiyun 		case SISL_FC_RC_TGTABORT:
104*4882a593Smuzhiyun 		case SISL_FC_RC_ABORTOK:
105*4882a593Smuzhiyun 		case SISL_FC_RC_ABORTFAIL:
106*4882a593Smuzhiyun 		case SISL_FC_RC_NOLOGI:
107*4882a593Smuzhiyun 		case SISL_FC_RC_ABORTPEND:
108*4882a593Smuzhiyun 		case SISL_FC_RC_WRABORTPEND:
109*4882a593Smuzhiyun 		case SISL_FC_RC_NOEXP:
110*4882a593Smuzhiyun 		case SISL_FC_RC_INUSE:
111*4882a593Smuzhiyun 			scp->result = (DID_ERROR << 16);
112*4882a593Smuzhiyun 			break;
113*4882a593Smuzhiyun 		}
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	if (ioasa->rc.afu_rc) {
117*4882a593Smuzhiyun 		/* We have an AFU error */
118*4882a593Smuzhiyun 		switch (ioasa->rc.afu_rc) {
119*4882a593Smuzhiyun 		case SISL_AFU_RC_NO_CHANNELS:
120*4882a593Smuzhiyun 			scp->result = (DID_NO_CONNECT << 16);
121*4882a593Smuzhiyun 			break;
122*4882a593Smuzhiyun 		case SISL_AFU_RC_DATA_DMA_ERR:
123*4882a593Smuzhiyun 			switch (ioasa->afu_extra) {
124*4882a593Smuzhiyun 			case SISL_AFU_DMA_ERR_PAGE_IN:
125*4882a593Smuzhiyun 				/* Retry */
126*4882a593Smuzhiyun 				scp->result = (DID_IMM_RETRY << 16);
127*4882a593Smuzhiyun 				break;
128*4882a593Smuzhiyun 			case SISL_AFU_DMA_ERR_INVALID_EA:
129*4882a593Smuzhiyun 			default:
130*4882a593Smuzhiyun 				scp->result = (DID_ERROR << 16);
131*4882a593Smuzhiyun 			}
132*4882a593Smuzhiyun 			break;
133*4882a593Smuzhiyun 		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
134*4882a593Smuzhiyun 			/* Retry */
135*4882a593Smuzhiyun 			scp->result = (DID_ALLOC_FAILURE << 16);
136*4882a593Smuzhiyun 			break;
137*4882a593Smuzhiyun 		default:
138*4882a593Smuzhiyun 			scp->result = (DID_ERROR << 16);
139*4882a593Smuzhiyun 		}
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun  * cmd_complete() - command completion handler
145*4882a593Smuzhiyun  * @cmd:	AFU command that has completed.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * For SCSI commands this routine prepares and submits commands that have
148*4882a593Smuzhiyun  * either completed or timed out to the SCSI stack. For internal commands
149*4882a593Smuzhiyun  * (TMF or AFU), this routine simply notifies the originator that the
150*4882a593Smuzhiyun  * command has completed.
151*4882a593Smuzhiyun  */
cmd_complete(struct afu_cmd * cmd)152*4882a593Smuzhiyun static void cmd_complete(struct afu_cmd *cmd)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct scsi_cmnd *scp;
155*4882a593Smuzhiyun 	ulong lock_flags;
156*4882a593Smuzhiyun 	struct afu *afu = cmd->parent;
157*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
158*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
159*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
162*4882a593Smuzhiyun 	list_del(&cmd->list);
163*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (cmd->scp) {
166*4882a593Smuzhiyun 		scp = cmd->scp;
167*4882a593Smuzhiyun 		if (unlikely(cmd->sa.ioasc))
168*4882a593Smuzhiyun 			process_cmd_err(cmd, scp);
169*4882a593Smuzhiyun 		else
170*4882a593Smuzhiyun 			scp->result = (DID_OK << 16);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
173*4882a593Smuzhiyun 				    __func__, scp, scp->result, cmd->sa.ioasc);
174*4882a593Smuzhiyun 		scp->scsi_done(scp);
175*4882a593Smuzhiyun 	} else if (cmd->cmd_tmf) {
176*4882a593Smuzhiyun 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
177*4882a593Smuzhiyun 		cfg->tmf_active = false;
178*4882a593Smuzhiyun 		wake_up_all_locked(&cfg->tmf_waitq);
179*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
180*4882a593Smuzhiyun 	} else
181*4882a593Smuzhiyun 		complete(&cmd->cevent);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun  * flush_pending_cmds() - flush all pending commands on this hardware queue
186*4882a593Smuzhiyun  * @hwq:	Hardware queue to flush.
187*4882a593Smuzhiyun  *
188*4882a593Smuzhiyun  * The hardware send queue lock associated with this hardware queue must be
189*4882a593Smuzhiyun  * held when calling this routine.
190*4882a593Smuzhiyun  */
flush_pending_cmds(struct hwq * hwq)191*4882a593Smuzhiyun static void flush_pending_cmds(struct hwq *hwq)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = hwq->afu->parent;
194*4882a593Smuzhiyun 	struct afu_cmd *cmd, *tmp;
195*4882a593Smuzhiyun 	struct scsi_cmnd *scp;
196*4882a593Smuzhiyun 	ulong lock_flags;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
199*4882a593Smuzhiyun 		/* Bypass command when on a doneq, cmd_complete() will handle */
200*4882a593Smuzhiyun 		if (!list_empty(&cmd->queue))
201*4882a593Smuzhiyun 			continue;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		list_del(&cmd->list);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		if (cmd->scp) {
206*4882a593Smuzhiyun 			scp = cmd->scp;
207*4882a593Smuzhiyun 			scp->result = (DID_IMM_RETRY << 16);
208*4882a593Smuzhiyun 			scp->scsi_done(scp);
209*4882a593Smuzhiyun 		} else {
210*4882a593Smuzhiyun 			cmd->cmd_aborted = true;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 			if (cmd->cmd_tmf) {
213*4882a593Smuzhiyun 				spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
214*4882a593Smuzhiyun 				cfg->tmf_active = false;
215*4882a593Smuzhiyun 				wake_up_all_locked(&cfg->tmf_waitq);
216*4882a593Smuzhiyun 				spin_unlock_irqrestore(&cfg->tmf_slock,
217*4882a593Smuzhiyun 						       lock_flags);
218*4882a593Smuzhiyun 			} else
219*4882a593Smuzhiyun 				complete(&cmd->cevent);
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun  * context_reset() - reset context via specified register
226*4882a593Smuzhiyun  * @hwq:	Hardware queue owning the context to be reset.
227*4882a593Smuzhiyun  * @reset_reg:	MMIO register to perform reset.
228*4882a593Smuzhiyun  *
229*4882a593Smuzhiyun  * When the reset is successful, the SISLite specification guarantees that
230*4882a593Smuzhiyun  * the AFU has aborted all currently pending I/O. Accordingly, these commands
231*4882a593Smuzhiyun  * must be flushed.
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
234*4882a593Smuzhiyun  */
context_reset(struct hwq * hwq,__be64 __iomem * reset_reg)235*4882a593Smuzhiyun static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = hwq->afu->parent;
238*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
239*4882a593Smuzhiyun 	int rc = -ETIMEDOUT;
240*4882a593Smuzhiyun 	int nretry = 0;
241*4882a593Smuzhiyun 	u64 val = 0x1;
242*4882a593Smuzhiyun 	ulong lock_flags;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	writeq_be(val, reset_reg);
249*4882a593Smuzhiyun 	do {
250*4882a593Smuzhiyun 		val = readq_be(reset_reg);
251*4882a593Smuzhiyun 		if ((val & 0x1) == 0x0) {
252*4882a593Smuzhiyun 			rc = 0;
253*4882a593Smuzhiyun 			break;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		/* Double delay each time */
257*4882a593Smuzhiyun 		udelay(1 << nretry);
258*4882a593Smuzhiyun 	} while (nretry++ < MC_ROOM_RETRY_CNT);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	if (!rc)
261*4882a593Smuzhiyun 		flush_pending_cmds(hwq);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
266*4882a593Smuzhiyun 		__func__, rc, val, nretry);
267*4882a593Smuzhiyun 	return rc;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /**
271*4882a593Smuzhiyun  * context_reset_ioarrin() - reset context via IOARRIN register
272*4882a593Smuzhiyun  * @hwq:	Hardware queue owning the context to be reset.
273*4882a593Smuzhiyun  *
274*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
275*4882a593Smuzhiyun  */
context_reset_ioarrin(struct hwq * hwq)276*4882a593Smuzhiyun static int context_reset_ioarrin(struct hwq *hwq)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	return context_reset(hwq, &hwq->host_map->ioarrin);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /**
282*4882a593Smuzhiyun  * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
283*4882a593Smuzhiyun  * @hwq:	Hardware queue owning the context to be reset.
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
286*4882a593Smuzhiyun  */
context_reset_sq(struct hwq * hwq)287*4882a593Smuzhiyun static int context_reset_sq(struct hwq *hwq)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
294*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
295*4882a593Smuzhiyun  * @cmd:	AFU command to send.
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * Return:
298*4882a593Smuzhiyun  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
299*4882a593Smuzhiyun  */
send_cmd_ioarrin(struct afu * afu,struct afu_cmd * cmd)300*4882a593Smuzhiyun static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
303*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
304*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
305*4882a593Smuzhiyun 	int rc = 0;
306*4882a593Smuzhiyun 	s64 room;
307*4882a593Smuzhiyun 	ulong lock_flags;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/*
310*4882a593Smuzhiyun 	 * To avoid the performance penalty of MMIO, spread the update of
311*4882a593Smuzhiyun 	 * 'room' over multiple commands.
312*4882a593Smuzhiyun 	 */
313*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
314*4882a593Smuzhiyun 	if (--hwq->room < 0) {
315*4882a593Smuzhiyun 		room = readq_be(&hwq->host_map->cmd_room);
316*4882a593Smuzhiyun 		if (room <= 0) {
317*4882a593Smuzhiyun 			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
318*4882a593Smuzhiyun 					    "0x%02X, room=0x%016llX\n",
319*4882a593Smuzhiyun 					    __func__, cmd->rcb.cdb[0], room);
320*4882a593Smuzhiyun 			hwq->room = 0;
321*4882a593Smuzhiyun 			rc = SCSI_MLQUEUE_HOST_BUSY;
322*4882a593Smuzhiyun 			goto out;
323*4882a593Smuzhiyun 		}
324*4882a593Smuzhiyun 		hwq->room = room - 1;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	list_add(&cmd->list, &hwq->pending_cmds);
328*4882a593Smuzhiyun 	writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
329*4882a593Smuzhiyun out:
330*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
331*4882a593Smuzhiyun 	dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
332*4882a593Smuzhiyun 		__func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
333*4882a593Smuzhiyun 	return rc;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /**
337*4882a593Smuzhiyun  * send_cmd_sq() - sends an AFU command via SQ ring
338*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
339*4882a593Smuzhiyun  * @cmd:	AFU command to send.
340*4882a593Smuzhiyun  *
341*4882a593Smuzhiyun  * Return:
342*4882a593Smuzhiyun  *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
343*4882a593Smuzhiyun  */
send_cmd_sq(struct afu * afu,struct afu_cmd * cmd)344*4882a593Smuzhiyun static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
347*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
348*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
349*4882a593Smuzhiyun 	int rc = 0;
350*4882a593Smuzhiyun 	int newval;
351*4882a593Smuzhiyun 	ulong lock_flags;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	newval = atomic_dec_if_positive(&hwq->hsq_credits);
354*4882a593Smuzhiyun 	if (newval <= 0) {
355*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
356*4882a593Smuzhiyun 		goto out;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	cmd->rcb.ioasa = &cmd->sa;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	*hwq->hsq_curr = cmd->rcb;
364*4882a593Smuzhiyun 	if (hwq->hsq_curr < hwq->hsq_end)
365*4882a593Smuzhiyun 		hwq->hsq_curr++;
366*4882a593Smuzhiyun 	else
367*4882a593Smuzhiyun 		hwq->hsq_curr = hwq->hsq_start;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	list_add(&cmd->list, &hwq->pending_cmds);
370*4882a593Smuzhiyun 	writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
373*4882a593Smuzhiyun out:
374*4882a593Smuzhiyun 	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
375*4882a593Smuzhiyun 	       "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
376*4882a593Smuzhiyun 	       cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
377*4882a593Smuzhiyun 	       readq_be(&hwq->host_map->sq_head),
378*4882a593Smuzhiyun 	       readq_be(&hwq->host_map->sq_tail));
379*4882a593Smuzhiyun 	return rc;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun  * wait_resp() - polls for a response or timeout to a sent AFU command
384*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
385*4882a593Smuzhiyun  * @cmd:	AFU command that was sent.
386*4882a593Smuzhiyun  *
387*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
388*4882a593Smuzhiyun  */
wait_resp(struct afu * afu,struct afu_cmd * cmd)389*4882a593Smuzhiyun static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
392*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
393*4882a593Smuzhiyun 	int rc = 0;
394*4882a593Smuzhiyun 	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
397*4882a593Smuzhiyun 	if (!timeout)
398*4882a593Smuzhiyun 		rc = -ETIMEDOUT;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (cmd->cmd_aborted)
401*4882a593Smuzhiyun 		rc = -EAGAIN;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (unlikely(cmd->sa.ioasc != 0)) {
404*4882a593Smuzhiyun 		dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
405*4882a593Smuzhiyun 			__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
406*4882a593Smuzhiyun 		rc = -EIO;
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return rc;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun  * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
414*4882a593Smuzhiyun  * @host:	SCSI host associated with device.
415*4882a593Smuzhiyun  * @scp:	SCSI command to send.
416*4882a593Smuzhiyun  * @afu:	SCSI command to send.
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Hashes a command based upon the hardware queue mode.
419*4882a593Smuzhiyun  *
420*4882a593Smuzhiyun  * Return: Trusted index of target hardware queue
421*4882a593Smuzhiyun  */
cmd_to_target_hwq(struct Scsi_Host * host,struct scsi_cmnd * scp,struct afu * afu)422*4882a593Smuzhiyun static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
423*4882a593Smuzhiyun 			     struct afu *afu)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	u32 tag;
426*4882a593Smuzhiyun 	u32 hwq = 0;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	if (afu->num_hwqs == 1)
429*4882a593Smuzhiyun 		return 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	switch (afu->hwq_mode) {
432*4882a593Smuzhiyun 	case HWQ_MODE_RR:
433*4882a593Smuzhiyun 		hwq = afu->hwq_rr_count++ % afu->num_hwqs;
434*4882a593Smuzhiyun 		break;
435*4882a593Smuzhiyun 	case HWQ_MODE_TAG:
436*4882a593Smuzhiyun 		tag = blk_mq_unique_tag(scp->request);
437*4882a593Smuzhiyun 		hwq = blk_mq_unique_tag_to_hwq(tag);
438*4882a593Smuzhiyun 		break;
439*4882a593Smuzhiyun 	case HWQ_MODE_CPU:
440*4882a593Smuzhiyun 		hwq = smp_processor_id() % afu->num_hwqs;
441*4882a593Smuzhiyun 		break;
442*4882a593Smuzhiyun 	default:
443*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	return hwq;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /**
450*4882a593Smuzhiyun  * send_tmf() - sends a Task Management Function (TMF)
451*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
452*4882a593Smuzhiyun  * @sdev:	SCSI device destined for TMF.
453*4882a593Smuzhiyun  * @tmfcmd:	TMF command to send.
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * Return:
456*4882a593Smuzhiyun  *	0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
457*4882a593Smuzhiyun  */
send_tmf(struct cxlflash_cfg * cfg,struct scsi_device * sdev,u64 tmfcmd)458*4882a593Smuzhiyun static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
459*4882a593Smuzhiyun 		    u64 tmfcmd)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
462*4882a593Smuzhiyun 	struct afu_cmd *cmd = NULL;
463*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
464*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
465*4882a593Smuzhiyun 	bool needs_deletion = false;
466*4882a593Smuzhiyun 	char *buf = NULL;
467*4882a593Smuzhiyun 	ulong lock_flags;
468*4882a593Smuzhiyun 	int rc = 0;
469*4882a593Smuzhiyun 	ulong to;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
472*4882a593Smuzhiyun 	if (unlikely(!buf)) {
473*4882a593Smuzhiyun 		dev_err(dev, "%s: no memory for command\n", __func__);
474*4882a593Smuzhiyun 		rc = -ENOMEM;
475*4882a593Smuzhiyun 		goto out;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
479*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cmd->queue);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* When Task Management Function is active do not send another */
482*4882a593Smuzhiyun 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
483*4882a593Smuzhiyun 	if (cfg->tmf_active)
484*4882a593Smuzhiyun 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
485*4882a593Smuzhiyun 						  !cfg->tmf_active,
486*4882a593Smuzhiyun 						  cfg->tmf_slock);
487*4882a593Smuzhiyun 	cfg->tmf_active = true;
488*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	cmd->parent = afu;
491*4882a593Smuzhiyun 	cmd->cmd_tmf = true;
492*4882a593Smuzhiyun 	cmd->hwq_index = hwq->index;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	cmd->rcb.ctx_id = hwq->ctx_hndl;
495*4882a593Smuzhiyun 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
496*4882a593Smuzhiyun 	cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
497*4882a593Smuzhiyun 	cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
498*4882a593Smuzhiyun 	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
499*4882a593Smuzhiyun 			      SISL_REQ_FLAGS_SUP_UNDERRUN |
500*4882a593Smuzhiyun 			      SISL_REQ_FLAGS_TMF_CMD);
501*4882a593Smuzhiyun 	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	rc = afu->send_cmd(afu, cmd);
504*4882a593Smuzhiyun 	if (unlikely(rc)) {
505*4882a593Smuzhiyun 		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
506*4882a593Smuzhiyun 		cfg->tmf_active = false;
507*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
508*4882a593Smuzhiyun 		goto out;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
512*4882a593Smuzhiyun 	to = msecs_to_jiffies(5000);
513*4882a593Smuzhiyun 	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
514*4882a593Smuzhiyun 						       !cfg->tmf_active,
515*4882a593Smuzhiyun 						       cfg->tmf_slock,
516*4882a593Smuzhiyun 						       to);
517*4882a593Smuzhiyun 	if (!to) {
518*4882a593Smuzhiyun 		dev_err(dev, "%s: TMF timed out\n", __func__);
519*4882a593Smuzhiyun 		rc = -ETIMEDOUT;
520*4882a593Smuzhiyun 		needs_deletion = true;
521*4882a593Smuzhiyun 	} else if (cmd->cmd_aborted) {
522*4882a593Smuzhiyun 		dev_err(dev, "%s: TMF aborted\n", __func__);
523*4882a593Smuzhiyun 		rc = -EAGAIN;
524*4882a593Smuzhiyun 	} else if (cmd->sa.ioasc) {
525*4882a593Smuzhiyun 		dev_err(dev, "%s: TMF failed ioasc=%08x\n",
526*4882a593Smuzhiyun 			__func__, cmd->sa.ioasc);
527*4882a593Smuzhiyun 		rc = -EIO;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 	cfg->tmf_active = false;
530*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	if (needs_deletion) {
533*4882a593Smuzhiyun 		spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
534*4882a593Smuzhiyun 		list_del(&cmd->list);
535*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun out:
538*4882a593Smuzhiyun 	kfree(buf);
539*4882a593Smuzhiyun 	return rc;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun /**
543*4882a593Smuzhiyun  * cxlflash_driver_info() - information handler for this host driver
544*4882a593Smuzhiyun  * @host:	SCSI host associated with device.
545*4882a593Smuzhiyun  *
546*4882a593Smuzhiyun  * Return: A string describing the device.
547*4882a593Smuzhiyun  */
cxlflash_driver_info(struct Scsi_Host * host)548*4882a593Smuzhiyun static const char *cxlflash_driver_info(struct Scsi_Host *host)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	return CXLFLASH_ADAPTER_NAME;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun /**
554*4882a593Smuzhiyun  * cxlflash_queuecommand() - sends a mid-layer request
555*4882a593Smuzhiyun  * @host:	SCSI host associated with device.
556*4882a593Smuzhiyun  * @scp:	SCSI command to send.
557*4882a593Smuzhiyun  *
558*4882a593Smuzhiyun  * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
559*4882a593Smuzhiyun  */
cxlflash_queuecommand(struct Scsi_Host * host,struct scsi_cmnd * scp)560*4882a593Smuzhiyun static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(host);
563*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
564*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
565*4882a593Smuzhiyun 	struct afu_cmd *cmd = sc_to_afuci(scp);
566*4882a593Smuzhiyun 	struct scatterlist *sg = scsi_sglist(scp);
567*4882a593Smuzhiyun 	int hwq_index = cmd_to_target_hwq(host, scp, afu);
568*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, hwq_index);
569*4882a593Smuzhiyun 	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
570*4882a593Smuzhiyun 	ulong lock_flags;
571*4882a593Smuzhiyun 	int rc = 0;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
574*4882a593Smuzhiyun 			    "cdb=(%08x-%08x-%08x-%08x)\n",
575*4882a593Smuzhiyun 			    __func__, scp, host->host_no, scp->device->channel,
576*4882a593Smuzhiyun 			    scp->device->id, scp->device->lun,
577*4882a593Smuzhiyun 			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
578*4882a593Smuzhiyun 			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
579*4882a593Smuzhiyun 			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
580*4882a593Smuzhiyun 			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/*
583*4882a593Smuzhiyun 	 * If a Task Management Function is active, wait for it to complete
584*4882a593Smuzhiyun 	 * before continuing with regular commands.
585*4882a593Smuzhiyun 	 */
586*4882a593Smuzhiyun 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
587*4882a593Smuzhiyun 	if (cfg->tmf_active) {
588*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
589*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
590*4882a593Smuzhiyun 		goto out;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	switch (cfg->state) {
595*4882a593Smuzhiyun 	case STATE_PROBING:
596*4882a593Smuzhiyun 	case STATE_PROBED:
597*4882a593Smuzhiyun 	case STATE_RESET:
598*4882a593Smuzhiyun 		dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
599*4882a593Smuzhiyun 		rc = SCSI_MLQUEUE_HOST_BUSY;
600*4882a593Smuzhiyun 		goto out;
601*4882a593Smuzhiyun 	case STATE_FAILTERM:
602*4882a593Smuzhiyun 		dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
603*4882a593Smuzhiyun 		scp->result = (DID_NO_CONNECT << 16);
604*4882a593Smuzhiyun 		scp->scsi_done(scp);
605*4882a593Smuzhiyun 		rc = 0;
606*4882a593Smuzhiyun 		goto out;
607*4882a593Smuzhiyun 	default:
608*4882a593Smuzhiyun 		atomic_inc(&afu->cmds_active);
609*4882a593Smuzhiyun 		break;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	if (likely(sg)) {
613*4882a593Smuzhiyun 		cmd->rcb.data_len = sg->length;
614*4882a593Smuzhiyun 		cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	cmd->scp = scp;
618*4882a593Smuzhiyun 	cmd->parent = afu;
619*4882a593Smuzhiyun 	cmd->hwq_index = hwq_index;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	cmd->sa.ioasc = 0;
622*4882a593Smuzhiyun 	cmd->rcb.ctx_id = hwq->ctx_hndl;
623*4882a593Smuzhiyun 	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
624*4882a593Smuzhiyun 	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
625*4882a593Smuzhiyun 	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (scp->sc_data_direction == DMA_TO_DEVICE)
628*4882a593Smuzhiyun 		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	cmd->rcb.req_flags = req_flags;
631*4882a593Smuzhiyun 	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	rc = afu->send_cmd(afu, cmd);
634*4882a593Smuzhiyun 	atomic_dec(&afu->cmds_active);
635*4882a593Smuzhiyun out:
636*4882a593Smuzhiyun 	return rc;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun /**
640*4882a593Smuzhiyun  * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
641*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
642*4882a593Smuzhiyun  */
cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg * cfg)643*4882a593Smuzhiyun static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct pci_dev *pdev = cfg->dev;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	if (pci_channel_offline(pdev))
648*4882a593Smuzhiyun 		wait_event_timeout(cfg->reset_waitq,
649*4882a593Smuzhiyun 				   !pci_channel_offline(pdev),
650*4882a593Smuzhiyun 				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /**
654*4882a593Smuzhiyun  * free_mem() - free memory associated with the AFU
655*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
656*4882a593Smuzhiyun  */
free_mem(struct cxlflash_cfg * cfg)657*4882a593Smuzhiyun static void free_mem(struct cxlflash_cfg *cfg)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	if (cfg->afu) {
662*4882a593Smuzhiyun 		free_pages((ulong)afu, get_order(sizeof(struct afu)));
663*4882a593Smuzhiyun 		cfg->afu = NULL;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /**
668*4882a593Smuzhiyun  * cxlflash_reset_sync() - synchronizing point for asynchronous resets
669*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
670*4882a593Smuzhiyun  */
cxlflash_reset_sync(struct cxlflash_cfg * cfg)671*4882a593Smuzhiyun static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	if (cfg->async_reset_cookie == 0)
674*4882a593Smuzhiyun 		return;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/* Wait until all async calls prior to this cookie have completed */
677*4882a593Smuzhiyun 	async_synchronize_cookie(cfg->async_reset_cookie + 1);
678*4882a593Smuzhiyun 	cfg->async_reset_cookie = 0;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /**
682*4882a593Smuzhiyun  * stop_afu() - stops the AFU command timers and unmaps the MMIO space
683*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
684*4882a593Smuzhiyun  *
685*4882a593Smuzhiyun  * Safe to call with AFU in a partially allocated/initialized state.
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  * Cancels scheduled worker threads, waits for any active internal AFU
688*4882a593Smuzhiyun  * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
689*4882a593Smuzhiyun  */
stop_afu(struct cxlflash_cfg * cfg)690*4882a593Smuzhiyun static void stop_afu(struct cxlflash_cfg *cfg)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
693*4882a593Smuzhiyun 	struct hwq *hwq;
694*4882a593Smuzhiyun 	int i;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	cancel_work_sync(&cfg->work_q);
697*4882a593Smuzhiyun 	if (!current_is_async())
698*4882a593Smuzhiyun 		cxlflash_reset_sync(cfg);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (likely(afu)) {
701*4882a593Smuzhiyun 		while (atomic_read(&afu->cmds_active))
702*4882a593Smuzhiyun 			ssleep(1);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		if (afu_is_irqpoll_enabled(afu)) {
705*4882a593Smuzhiyun 			for (i = 0; i < afu->num_hwqs; i++) {
706*4882a593Smuzhiyun 				hwq = get_hwq(afu, i);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 				irq_poll_disable(&hwq->irqpoll);
709*4882a593Smuzhiyun 			}
710*4882a593Smuzhiyun 		}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		if (likely(afu->afu_map)) {
713*4882a593Smuzhiyun 			cfg->ops->psa_unmap(afu->afu_map);
714*4882a593Smuzhiyun 			afu->afu_map = NULL;
715*4882a593Smuzhiyun 		}
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun /**
720*4882a593Smuzhiyun  * term_intr() - disables all AFU interrupts
721*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
722*4882a593Smuzhiyun  * @level:	Depth of allocation, where to begin waterfall tear down.
723*4882a593Smuzhiyun  * @index:	Index of the hardware queue.
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * Safe to call with AFU/MC in partially allocated/initialized state.
726*4882a593Smuzhiyun  */
term_intr(struct cxlflash_cfg * cfg,enum undo_level level,u32 index)727*4882a593Smuzhiyun static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
728*4882a593Smuzhiyun 		      u32 index)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
731*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
732*4882a593Smuzhiyun 	struct hwq *hwq;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (!afu) {
735*4882a593Smuzhiyun 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
736*4882a593Smuzhiyun 		return;
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	hwq = get_hwq(afu, index);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (!hwq->ctx_cookie) {
742*4882a593Smuzhiyun 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
743*4882a593Smuzhiyun 		return;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	switch (level) {
747*4882a593Smuzhiyun 	case UNMAP_THREE:
748*4882a593Smuzhiyun 		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749*4882a593Smuzhiyun 		if (index == PRIMARY_HWQ)
750*4882a593Smuzhiyun 			cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
751*4882a593Smuzhiyun 		fallthrough;
752*4882a593Smuzhiyun 	case UNMAP_TWO:
753*4882a593Smuzhiyun 		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
754*4882a593Smuzhiyun 		fallthrough;
755*4882a593Smuzhiyun 	case UNMAP_ONE:
756*4882a593Smuzhiyun 		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
757*4882a593Smuzhiyun 		fallthrough;
758*4882a593Smuzhiyun 	case FREE_IRQ:
759*4882a593Smuzhiyun 		cfg->ops->free_afu_irqs(hwq->ctx_cookie);
760*4882a593Smuzhiyun 		fallthrough;
761*4882a593Smuzhiyun 	case UNDO_NOOP:
762*4882a593Smuzhiyun 		/* No action required */
763*4882a593Smuzhiyun 		break;
764*4882a593Smuzhiyun 	}
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /**
768*4882a593Smuzhiyun  * term_mc() - terminates the master context
769*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
770*4882a593Smuzhiyun  * @index:	Index of the hardware queue.
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * Safe to call with AFU/MC in partially allocated/initialized state.
773*4882a593Smuzhiyun  */
term_mc(struct cxlflash_cfg * cfg,u32 index)774*4882a593Smuzhiyun static void term_mc(struct cxlflash_cfg *cfg, u32 index)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
777*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
778*4882a593Smuzhiyun 	struct hwq *hwq;
779*4882a593Smuzhiyun 	ulong lock_flags;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (!afu) {
782*4882a593Smuzhiyun 		dev_err(dev, "%s: returning with NULL afu\n", __func__);
783*4882a593Smuzhiyun 		return;
784*4882a593Smuzhiyun 	}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	hwq = get_hwq(afu, index);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	if (!hwq->ctx_cookie) {
789*4882a593Smuzhiyun 		dev_err(dev, "%s: returning with NULL MC\n", __func__);
790*4882a593Smuzhiyun 		return;
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
794*4882a593Smuzhiyun 	if (index != PRIMARY_HWQ)
795*4882a593Smuzhiyun 		WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
796*4882a593Smuzhiyun 	hwq->ctx_cookie = NULL;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
799*4882a593Smuzhiyun 	hwq->hrrq_online = false;
800*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
803*4882a593Smuzhiyun 	flush_pending_cmds(hwq);
804*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun /**
808*4882a593Smuzhiyun  * term_afu() - terminates the AFU
809*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
810*4882a593Smuzhiyun  *
811*4882a593Smuzhiyun  * Safe to call with AFU/MC in partially allocated/initialized state.
812*4882a593Smuzhiyun  */
term_afu(struct cxlflash_cfg * cfg)813*4882a593Smuzhiyun static void term_afu(struct cxlflash_cfg *cfg)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
816*4882a593Smuzhiyun 	int k;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/*
819*4882a593Smuzhiyun 	 * Tear down is carefully orchestrated to ensure
820*4882a593Smuzhiyun 	 * no interrupts can come in when the problem state
821*4882a593Smuzhiyun 	 * area is unmapped.
822*4882a593Smuzhiyun 	 *
823*4882a593Smuzhiyun 	 * 1) Disable all AFU interrupts for each master
824*4882a593Smuzhiyun 	 * 2) Unmap the problem state area
825*4882a593Smuzhiyun 	 * 3) Stop each master context
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
828*4882a593Smuzhiyun 		term_intr(cfg, UNMAP_THREE, k);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	stop_afu(cfg);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
833*4882a593Smuzhiyun 		term_mc(cfg, k);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning\n", __func__);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun  * notify_shutdown() - notifies device of pending shutdown
840*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
841*4882a593Smuzhiyun  * @wait:	Whether to wait for shutdown processing to complete.
842*4882a593Smuzhiyun  *
843*4882a593Smuzhiyun  * This function will notify the AFU that the adapter is being shutdown
844*4882a593Smuzhiyun  * and will wait for shutdown processing to complete if wait is true.
845*4882a593Smuzhiyun  * This notification should flush pending I/Os to the device and halt
846*4882a593Smuzhiyun  * further I/Os until the next AFU reset is issued and device restarted.
847*4882a593Smuzhiyun  */
notify_shutdown(struct cxlflash_cfg * cfg,bool wait)848*4882a593Smuzhiyun static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
851*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
852*4882a593Smuzhiyun 	struct dev_dependent_vals *ddv;
853*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
854*4882a593Smuzhiyun 	u64 reg, status;
855*4882a593Smuzhiyun 	int i, retry_cnt = 0;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
858*4882a593Smuzhiyun 	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
859*4882a593Smuzhiyun 		return;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (!afu || !afu->afu_map) {
862*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
863*4882a593Smuzhiyun 		return;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Notify AFU */
867*4882a593Smuzhiyun 	for (i = 0; i < cfg->num_fc_ports; i++) {
868*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, i);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 		reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
871*4882a593Smuzhiyun 		reg |= SISL_FC_SHUTDOWN_NORMAL;
872*4882a593Smuzhiyun 		writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (!wait)
876*4882a593Smuzhiyun 		return;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/* Wait up to 1.5 seconds for shutdown processing to complete */
879*4882a593Smuzhiyun 	for (i = 0; i < cfg->num_fc_ports; i++) {
880*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, i);
881*4882a593Smuzhiyun 		retry_cnt = 0;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 		while (true) {
884*4882a593Smuzhiyun 			status = readq_be(&fc_port_regs[FC_STATUS / 8]);
885*4882a593Smuzhiyun 			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
886*4882a593Smuzhiyun 				break;
887*4882a593Smuzhiyun 			if (++retry_cnt >= MC_RETRY_CNT) {
888*4882a593Smuzhiyun 				dev_dbg(dev, "%s: port %d shutdown processing "
889*4882a593Smuzhiyun 					"not yet completed\n", __func__, i);
890*4882a593Smuzhiyun 				break;
891*4882a593Smuzhiyun 			}
892*4882a593Smuzhiyun 			msleep(100 * retry_cnt);
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun /**
898*4882a593Smuzhiyun  * cxlflash_get_minor() - gets the first available minor number
899*4882a593Smuzhiyun  *
900*4882a593Smuzhiyun  * Return: Unique minor number that can be used to create the character device.
901*4882a593Smuzhiyun  */
cxlflash_get_minor(void)902*4882a593Smuzhiyun static int cxlflash_get_minor(void)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	int minor;
905*4882a593Smuzhiyun 	long bit;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
908*4882a593Smuzhiyun 	if (bit >= CXLFLASH_MAX_ADAPTERS)
909*4882a593Smuzhiyun 		return -1;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	minor = bit & MINORMASK;
912*4882a593Smuzhiyun 	set_bit(minor, cxlflash_minor);
913*4882a593Smuzhiyun 	return minor;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun /**
917*4882a593Smuzhiyun  * cxlflash_put_minor() - releases the minor number
918*4882a593Smuzhiyun  * @minor:	Minor number that is no longer needed.
919*4882a593Smuzhiyun  */
cxlflash_put_minor(int minor)920*4882a593Smuzhiyun static void cxlflash_put_minor(int minor)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	clear_bit(minor, cxlflash_minor);
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun /**
926*4882a593Smuzhiyun  * cxlflash_release_chrdev() - release the character device for the host
927*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
928*4882a593Smuzhiyun  */
cxlflash_release_chrdev(struct cxlflash_cfg * cfg)929*4882a593Smuzhiyun static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun 	device_unregister(cfg->chardev);
932*4882a593Smuzhiyun 	cfg->chardev = NULL;
933*4882a593Smuzhiyun 	cdev_del(&cfg->cdev);
934*4882a593Smuzhiyun 	cxlflash_put_minor(MINOR(cfg->cdev.dev));
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun /**
938*4882a593Smuzhiyun  * cxlflash_remove() - PCI entry point to tear down host
939*4882a593Smuzhiyun  * @pdev:	PCI device associated with the host.
940*4882a593Smuzhiyun  *
941*4882a593Smuzhiyun  * Safe to use as a cleanup in partially allocated/initialized state. Note that
942*4882a593Smuzhiyun  * the reset_waitq is flushed as part of the stop/termination of user contexts.
943*4882a593Smuzhiyun  */
cxlflash_remove(struct pci_dev * pdev)944*4882a593Smuzhiyun static void cxlflash_remove(struct pci_dev *pdev)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
947*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
948*4882a593Smuzhiyun 	ulong lock_flags;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (!pci_is_enabled(pdev)) {
951*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Device is disabled\n", __func__);
952*4882a593Smuzhiyun 		return;
953*4882a593Smuzhiyun 	}
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	/* Yield to running recovery threads before continuing with remove */
956*4882a593Smuzhiyun 	wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
957*4882a593Smuzhiyun 				     cfg->state != STATE_PROBING);
958*4882a593Smuzhiyun 	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
959*4882a593Smuzhiyun 	if (cfg->tmf_active)
960*4882a593Smuzhiyun 		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
961*4882a593Smuzhiyun 						  !cfg->tmf_active,
962*4882a593Smuzhiyun 						  cfg->tmf_slock);
963*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	/* Notify AFU and wait for shutdown processing to complete */
966*4882a593Smuzhiyun 	notify_shutdown(cfg, true);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	cfg->state = STATE_FAILTERM;
969*4882a593Smuzhiyun 	cxlflash_stop_term_user_contexts(cfg);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	switch (cfg->init_state) {
972*4882a593Smuzhiyun 	case INIT_STATE_CDEV:
973*4882a593Smuzhiyun 		cxlflash_release_chrdev(cfg);
974*4882a593Smuzhiyun 		fallthrough;
975*4882a593Smuzhiyun 	case INIT_STATE_SCSI:
976*4882a593Smuzhiyun 		cxlflash_term_local_luns(cfg);
977*4882a593Smuzhiyun 		scsi_remove_host(cfg->host);
978*4882a593Smuzhiyun 		fallthrough;
979*4882a593Smuzhiyun 	case INIT_STATE_AFU:
980*4882a593Smuzhiyun 		term_afu(cfg);
981*4882a593Smuzhiyun 		fallthrough;
982*4882a593Smuzhiyun 	case INIT_STATE_PCI:
983*4882a593Smuzhiyun 		cfg->ops->destroy_afu(cfg->afu_cookie);
984*4882a593Smuzhiyun 		pci_disable_device(pdev);
985*4882a593Smuzhiyun 		fallthrough;
986*4882a593Smuzhiyun 	case INIT_STATE_NONE:
987*4882a593Smuzhiyun 		free_mem(cfg);
988*4882a593Smuzhiyun 		scsi_host_put(cfg->host);
989*4882a593Smuzhiyun 		break;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning\n", __func__);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun /**
996*4882a593Smuzhiyun  * alloc_mem() - allocates the AFU and its command pool
997*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
998*4882a593Smuzhiyun  *
999*4882a593Smuzhiyun  * A partially allocated state remains on failure.
1000*4882a593Smuzhiyun  *
1001*4882a593Smuzhiyun  * Return:
1002*4882a593Smuzhiyun  *	0 on success
1003*4882a593Smuzhiyun  *	-ENOMEM on failure to allocate memory
1004*4882a593Smuzhiyun  */
alloc_mem(struct cxlflash_cfg * cfg)1005*4882a593Smuzhiyun static int alloc_mem(struct cxlflash_cfg *cfg)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	int rc = 0;
1008*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	/* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1011*4882a593Smuzhiyun 	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012*4882a593Smuzhiyun 					    get_order(sizeof(struct afu)));
1013*4882a593Smuzhiyun 	if (unlikely(!cfg->afu)) {
1014*4882a593Smuzhiyun 		dev_err(dev, "%s: cannot get %d free pages\n",
1015*4882a593Smuzhiyun 			__func__, get_order(sizeof(struct afu)));
1016*4882a593Smuzhiyun 		rc = -ENOMEM;
1017*4882a593Smuzhiyun 		goto out;
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 	cfg->afu->parent = cfg;
1020*4882a593Smuzhiyun 	cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1021*4882a593Smuzhiyun 	cfg->afu->afu_map = NULL;
1022*4882a593Smuzhiyun out:
1023*4882a593Smuzhiyun 	return rc;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun /**
1027*4882a593Smuzhiyun  * init_pci() - initializes the host as a PCI device
1028*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1029*4882a593Smuzhiyun  *
1030*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1031*4882a593Smuzhiyun  */
init_pci(struct cxlflash_cfg * cfg)1032*4882a593Smuzhiyun static int init_pci(struct cxlflash_cfg *cfg)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	struct pci_dev *pdev = cfg->dev;
1035*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1036*4882a593Smuzhiyun 	int rc = 0;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	rc = pci_enable_device(pdev);
1039*4882a593Smuzhiyun 	if (rc || pci_channel_offline(pdev)) {
1040*4882a593Smuzhiyun 		if (pci_channel_offline(pdev)) {
1041*4882a593Smuzhiyun 			cxlflash_wait_for_pci_err_recovery(cfg);
1042*4882a593Smuzhiyun 			rc = pci_enable_device(pdev);
1043*4882a593Smuzhiyun 		}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		if (rc) {
1046*4882a593Smuzhiyun 			dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1047*4882a593Smuzhiyun 			cxlflash_wait_for_pci_err_recovery(cfg);
1048*4882a593Smuzhiyun 			goto out;
1049*4882a593Smuzhiyun 		}
1050*4882a593Smuzhiyun 	}
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun out:
1053*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1054*4882a593Smuzhiyun 	return rc;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun  * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1059*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1060*4882a593Smuzhiyun  *
1061*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1062*4882a593Smuzhiyun  */
init_scsi(struct cxlflash_cfg * cfg)1063*4882a593Smuzhiyun static int init_scsi(struct cxlflash_cfg *cfg)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	struct pci_dev *pdev = cfg->dev;
1066*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1067*4882a593Smuzhiyun 	int rc = 0;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	rc = scsi_add_host(cfg->host, &pdev->dev);
1070*4882a593Smuzhiyun 	if (rc) {
1071*4882a593Smuzhiyun 		dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1072*4882a593Smuzhiyun 		goto out;
1073*4882a593Smuzhiyun 	}
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	scsi_scan_host(cfg->host);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun out:
1078*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1079*4882a593Smuzhiyun 	return rc;
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /**
1083*4882a593Smuzhiyun  * set_port_online() - transitions the specified host FC port to online state
1084*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call. Online state means
1087*4882a593Smuzhiyun  * that the FC link layer has synced, completed the handshaking process, and
1088*4882a593Smuzhiyun  * is ready for login to start.
1089*4882a593Smuzhiyun  */
set_port_online(__be64 __iomem * fc_regs)1090*4882a593Smuzhiyun static void set_port_online(__be64 __iomem *fc_regs)
1091*4882a593Smuzhiyun {
1092*4882a593Smuzhiyun 	u64 cmdcfg;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1095*4882a593Smuzhiyun 	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
1096*4882a593Smuzhiyun 	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
1097*4882a593Smuzhiyun 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun /**
1101*4882a593Smuzhiyun  * set_port_offline() - transitions the specified host FC port to offline state
1102*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1103*4882a593Smuzhiyun  *
1104*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call.
1105*4882a593Smuzhiyun  */
set_port_offline(__be64 __iomem * fc_regs)1106*4882a593Smuzhiyun static void set_port_offline(__be64 __iomem *fc_regs)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	u64 cmdcfg;
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1111*4882a593Smuzhiyun 	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
1112*4882a593Smuzhiyun 	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
1113*4882a593Smuzhiyun 	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun /**
1117*4882a593Smuzhiyun  * wait_port_online() - waits for the specified host FC port come online
1118*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1119*4882a593Smuzhiyun  * @delay_us:	Number of microseconds to delay between reading port status.
1120*4882a593Smuzhiyun  * @nretry:	Number of cycles to retry reading port status.
1121*4882a593Smuzhiyun  *
1122*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call. This will timeout
1123*4882a593Smuzhiyun  * when the cable is not plugged in.
1124*4882a593Smuzhiyun  *
1125*4882a593Smuzhiyun  * Return:
1126*4882a593Smuzhiyun  *	TRUE (1) when the specified port is online
1127*4882a593Smuzhiyun  *	FALSE (0) when the specified port fails to come online after timeout
1128*4882a593Smuzhiyun  */
wait_port_online(__be64 __iomem * fc_regs,u32 delay_us,u32 nretry)1129*4882a593Smuzhiyun static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun 	u64 status;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	WARN_ON(delay_us < 1000);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	do {
1136*4882a593Smuzhiyun 		msleep(delay_us / 1000);
1137*4882a593Smuzhiyun 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1138*4882a593Smuzhiyun 		if (status == U64_MAX)
1139*4882a593Smuzhiyun 			nretry /= 2;
1140*4882a593Smuzhiyun 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141*4882a593Smuzhiyun 		 nretry--);
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /**
1147*4882a593Smuzhiyun  * wait_port_offline() - waits for the specified host FC port go offline
1148*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1149*4882a593Smuzhiyun  * @delay_us:	Number of microseconds to delay between reading port status.
1150*4882a593Smuzhiyun  * @nretry:	Number of cycles to retry reading port status.
1151*4882a593Smuzhiyun  *
1152*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call.
1153*4882a593Smuzhiyun  *
1154*4882a593Smuzhiyun  * Return:
1155*4882a593Smuzhiyun  *	TRUE (1) when the specified port is offline
1156*4882a593Smuzhiyun  *	FALSE (0) when the specified port fails to go offline after timeout
1157*4882a593Smuzhiyun  */
wait_port_offline(__be64 __iomem * fc_regs,u32 delay_us,u32 nretry)1158*4882a593Smuzhiyun static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	u64 status;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	WARN_ON(delay_us < 1000);
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	do {
1165*4882a593Smuzhiyun 		msleep(delay_us / 1000);
1166*4882a593Smuzhiyun 		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1167*4882a593Smuzhiyun 		if (status == U64_MAX)
1168*4882a593Smuzhiyun 			nretry /= 2;
1169*4882a593Smuzhiyun 	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1170*4882a593Smuzhiyun 		 nretry--);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun /**
1176*4882a593Smuzhiyun  * afu_set_wwpn() - configures the WWPN for the specified host FC port
1177*4882a593Smuzhiyun  * @afu:	AFU associated with the host that owns the specified FC port.
1178*4882a593Smuzhiyun  * @port:	Port number being configured.
1179*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1180*4882a593Smuzhiyun  * @wwpn:	The world-wide-port-number previously discovered for port.
1181*4882a593Smuzhiyun  *
1182*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call. As part of the
1183*4882a593Smuzhiyun  * sequence to configure the WWPN, the port is toggled offline and then back
1184*4882a593Smuzhiyun  * online. This toggling action can cause this routine to delay up to a few
1185*4882a593Smuzhiyun  * seconds. When configured to use the internal LUN feature of the AFU, a
1186*4882a593Smuzhiyun  * failure to come online is overridden.
1187*4882a593Smuzhiyun  */
afu_set_wwpn(struct afu * afu,int port,__be64 __iomem * fc_regs,u64 wwpn)1188*4882a593Smuzhiyun static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1189*4882a593Smuzhiyun 			 u64 wwpn)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
1192*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	set_port_offline(fc_regs);
1195*4882a593Smuzhiyun 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1196*4882a593Smuzhiyun 			       FC_PORT_STATUS_RETRY_CNT)) {
1197*4882a593Smuzhiyun 		dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1198*4882a593Smuzhiyun 			__func__, port);
1199*4882a593Smuzhiyun 	}
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	set_port_online(fc_regs);
1204*4882a593Smuzhiyun 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1205*4882a593Smuzhiyun 			      FC_PORT_STATUS_RETRY_CNT)) {
1206*4882a593Smuzhiyun 		dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1207*4882a593Smuzhiyun 			__func__, port);
1208*4882a593Smuzhiyun 	}
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun /**
1212*4882a593Smuzhiyun  * afu_link_reset() - resets the specified host FC port
1213*4882a593Smuzhiyun  * @afu:	AFU associated with the host that owns the specified FC port.
1214*4882a593Smuzhiyun  * @port:	Port number being configured.
1215*4882a593Smuzhiyun  * @fc_regs:	Top of MMIO region defined for specified port.
1216*4882a593Smuzhiyun  *
1217*4882a593Smuzhiyun  * The provided MMIO region must be mapped prior to call. The sequence to
1218*4882a593Smuzhiyun  * reset the port involves toggling it offline and then back online. This
1219*4882a593Smuzhiyun  * action can cause this routine to delay up to a few seconds. An effort
1220*4882a593Smuzhiyun  * is made to maintain link with the device by switching to host to use
1221*4882a593Smuzhiyun  * the alternate port exclusively while the reset takes place.
1222*4882a593Smuzhiyun  * failure to come online is overridden.
1223*4882a593Smuzhiyun  */
afu_link_reset(struct afu * afu,int port,__be64 __iomem * fc_regs)1224*4882a593Smuzhiyun static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
1227*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1228*4882a593Smuzhiyun 	u64 port_sel;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	/* first switch the AFU to the other links, if any */
1231*4882a593Smuzhiyun 	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1232*4882a593Smuzhiyun 	port_sel &= ~(1ULL << port);
1233*4882a593Smuzhiyun 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1234*4882a593Smuzhiyun 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	set_port_offline(fc_regs);
1237*4882a593Smuzhiyun 	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1238*4882a593Smuzhiyun 			       FC_PORT_STATUS_RETRY_CNT))
1239*4882a593Smuzhiyun 		dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1240*4882a593Smuzhiyun 			__func__, port);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	set_port_online(fc_regs);
1243*4882a593Smuzhiyun 	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1244*4882a593Smuzhiyun 			      FC_PORT_STATUS_RETRY_CNT))
1245*4882a593Smuzhiyun 		dev_err(dev, "%s: wait on port %d to go online timed out\n",
1246*4882a593Smuzhiyun 			__func__, port);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	/* switch back to include this port */
1249*4882a593Smuzhiyun 	port_sel |= (1ULL << port);
1250*4882a593Smuzhiyun 	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1251*4882a593Smuzhiyun 	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun /**
1257*4882a593Smuzhiyun  * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1258*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
1259*4882a593Smuzhiyun  */
afu_err_intr_init(struct afu * afu)1260*4882a593Smuzhiyun static void afu_err_intr_init(struct afu *afu)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
1263*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
1264*4882a593Smuzhiyun 	int i;
1265*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1266*4882a593Smuzhiyun 	u64 reg;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/* global async interrupts: AFU clears afu_ctrl on context exit
1269*4882a593Smuzhiyun 	 * if async interrupts were sent to that context. This prevents
1270*4882a593Smuzhiyun 	 * the AFU form sending further async interrupts when
1271*4882a593Smuzhiyun 	 * there is
1272*4882a593Smuzhiyun 	 * nobody to receive them.
1273*4882a593Smuzhiyun 	 */
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/* mask all */
1276*4882a593Smuzhiyun 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1277*4882a593Smuzhiyun 	/* set LISN# to send and point to primary master context */
1278*4882a593Smuzhiyun 	reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	if (afu->internal_lun)
1281*4882a593Smuzhiyun 		reg |= 1;	/* Bit 63 indicates local lun */
1282*4882a593Smuzhiyun 	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1283*4882a593Smuzhiyun 	/* clear all */
1284*4882a593Smuzhiyun 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1285*4882a593Smuzhiyun 	/* unmask bits that are of interest */
1286*4882a593Smuzhiyun 	/* note: afu can send an interrupt after this step */
1287*4882a593Smuzhiyun 	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1288*4882a593Smuzhiyun 	/* clear again in case a bit came on after previous clear but before */
1289*4882a593Smuzhiyun 	/* unmask */
1290*4882a593Smuzhiyun 	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	/* Clear/Set internal lun bits */
1293*4882a593Smuzhiyun 	fc_port_regs = get_fc_port_regs(cfg, 0);
1294*4882a593Smuzhiyun 	reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1295*4882a593Smuzhiyun 	reg &= SISL_FC_INTERNAL_MASK;
1296*4882a593Smuzhiyun 	if (afu->internal_lun)
1297*4882a593Smuzhiyun 		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1298*4882a593Smuzhiyun 	writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	/* now clear FC errors */
1301*4882a593Smuzhiyun 	for (i = 0; i < cfg->num_fc_ports; i++) {
1302*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, i);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 		writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1305*4882a593Smuzhiyun 		writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	/* sync interrupts for master's IOARRIN write */
1309*4882a593Smuzhiyun 	/* note that unlike asyncs, there can be no pending sync interrupts */
1310*4882a593Smuzhiyun 	/* at this time (this is a fresh context and master has not written */
1311*4882a593Smuzhiyun 	/* IOARRIN yet), so there is nothing to clear. */
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1314*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
1315*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 		reg = readq_be(&hwq->host_map->ctx_ctrl);
1318*4882a593Smuzhiyun 		WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1319*4882a593Smuzhiyun 		reg |= SISL_MSI_SYNC_ERROR;
1320*4882a593Smuzhiyun 		writeq_be(reg, &hwq->host_map->ctx_ctrl);
1321*4882a593Smuzhiyun 		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1322*4882a593Smuzhiyun 	}
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun /**
1326*4882a593Smuzhiyun  * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1327*4882a593Smuzhiyun  * @irq:	Interrupt number.
1328*4882a593Smuzhiyun  * @data:	Private data provided at interrupt registration, the AFU.
1329*4882a593Smuzhiyun  *
1330*4882a593Smuzhiyun  * Return: Always return IRQ_HANDLED.
1331*4882a593Smuzhiyun  */
cxlflash_sync_err_irq(int irq,void * data)1332*4882a593Smuzhiyun static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	struct hwq *hwq = (struct hwq *)data;
1335*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = hwq->afu->parent;
1336*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1337*4882a593Smuzhiyun 	u64 reg;
1338*4882a593Smuzhiyun 	u64 reg_unmasked;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	reg = readq_be(&hwq->host_map->intr_status);
1341*4882a593Smuzhiyun 	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	if (reg_unmasked == 0UL) {
1344*4882a593Smuzhiyun 		dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1345*4882a593Smuzhiyun 			__func__, reg);
1346*4882a593Smuzhiyun 		goto cxlflash_sync_err_irq_exit;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1350*4882a593Smuzhiyun 		__func__, reg);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun cxlflash_sync_err_irq_exit:
1355*4882a593Smuzhiyun 	return IRQ_HANDLED;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun /**
1359*4882a593Smuzhiyun  * process_hrrq() - process the read-response queue
1360*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
1361*4882a593Smuzhiyun  * @doneq:	Queue of commands harvested from the RRQ.
1362*4882a593Smuzhiyun  * @budget:	Threshold of RRQ entries to process.
1363*4882a593Smuzhiyun  *
1364*4882a593Smuzhiyun  * This routine must be called holding the disabled RRQ spin lock.
1365*4882a593Smuzhiyun  *
1366*4882a593Smuzhiyun  * Return: The number of entries processed.
1367*4882a593Smuzhiyun  */
process_hrrq(struct hwq * hwq,struct list_head * doneq,int budget)1368*4882a593Smuzhiyun static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct afu *afu = hwq->afu;
1371*4882a593Smuzhiyun 	struct afu_cmd *cmd;
1372*4882a593Smuzhiyun 	struct sisl_ioasa *ioasa;
1373*4882a593Smuzhiyun 	struct sisl_ioarcb *ioarcb;
1374*4882a593Smuzhiyun 	bool toggle = hwq->toggle;
1375*4882a593Smuzhiyun 	int num_hrrq = 0;
1376*4882a593Smuzhiyun 	u64 entry,
1377*4882a593Smuzhiyun 	    *hrrq_start = hwq->hrrq_start,
1378*4882a593Smuzhiyun 	    *hrrq_end = hwq->hrrq_end,
1379*4882a593Smuzhiyun 	    *hrrq_curr = hwq->hrrq_curr;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	/* Process ready RRQ entries up to the specified budget (if any) */
1382*4882a593Smuzhiyun 	while (true) {
1383*4882a593Smuzhiyun 		entry = *hrrq_curr;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1386*4882a593Smuzhiyun 			break;
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 		entry &= ~SISL_RESP_HANDLE_T_BIT;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 		if (afu_is_sq_cmd_mode(afu)) {
1391*4882a593Smuzhiyun 			ioasa = (struct sisl_ioasa *)entry;
1392*4882a593Smuzhiyun 			cmd = container_of(ioasa, struct afu_cmd, sa);
1393*4882a593Smuzhiyun 		} else {
1394*4882a593Smuzhiyun 			ioarcb = (struct sisl_ioarcb *)entry;
1395*4882a593Smuzhiyun 			cmd = container_of(ioarcb, struct afu_cmd, rcb);
1396*4882a593Smuzhiyun 		}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 		list_add_tail(&cmd->queue, doneq);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 		/* Advance to next entry or wrap and flip the toggle bit */
1401*4882a593Smuzhiyun 		if (hrrq_curr < hrrq_end)
1402*4882a593Smuzhiyun 			hrrq_curr++;
1403*4882a593Smuzhiyun 		else {
1404*4882a593Smuzhiyun 			hrrq_curr = hrrq_start;
1405*4882a593Smuzhiyun 			toggle ^= SISL_RESP_HANDLE_T_BIT;
1406*4882a593Smuzhiyun 		}
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 		atomic_inc(&hwq->hsq_credits);
1409*4882a593Smuzhiyun 		num_hrrq++;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 		if (budget > 0 && num_hrrq >= budget)
1412*4882a593Smuzhiyun 			break;
1413*4882a593Smuzhiyun 	}
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	hwq->hrrq_curr = hrrq_curr;
1416*4882a593Smuzhiyun 	hwq->toggle = toggle;
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	return num_hrrq;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun /**
1422*4882a593Smuzhiyun  * process_cmd_doneq() - process a queue of harvested RRQ commands
1423*4882a593Smuzhiyun  * @doneq:	Queue of completed commands.
1424*4882a593Smuzhiyun  *
1425*4882a593Smuzhiyun  * Note that upon return the queue can no longer be trusted.
1426*4882a593Smuzhiyun  */
process_cmd_doneq(struct list_head * doneq)1427*4882a593Smuzhiyun static void process_cmd_doneq(struct list_head *doneq)
1428*4882a593Smuzhiyun {
1429*4882a593Smuzhiyun 	struct afu_cmd *cmd, *tmp;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	WARN_ON(list_empty(doneq));
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	list_for_each_entry_safe(cmd, tmp, doneq, queue)
1434*4882a593Smuzhiyun 		cmd_complete(cmd);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun 
1437*4882a593Smuzhiyun /**
1438*4882a593Smuzhiyun  * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1439*4882a593Smuzhiyun  * @irqpoll:	IRQ poll structure associated with queue to poll.
1440*4882a593Smuzhiyun  * @budget:	Threshold of RRQ entries to process per poll.
1441*4882a593Smuzhiyun  *
1442*4882a593Smuzhiyun  * Return: The number of entries processed.
1443*4882a593Smuzhiyun  */
cxlflash_irqpoll(struct irq_poll * irqpoll,int budget)1444*4882a593Smuzhiyun static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1447*4882a593Smuzhiyun 	unsigned long hrrq_flags;
1448*4882a593Smuzhiyun 	LIST_HEAD(doneq);
1449*4882a593Smuzhiyun 	int num_entries = 0;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	num_entries = process_hrrq(hwq, &doneq, budget);
1454*4882a593Smuzhiyun 	if (num_entries < budget)
1455*4882a593Smuzhiyun 		irq_poll_complete(irqpoll);
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	process_cmd_doneq(&doneq);
1460*4882a593Smuzhiyun 	return num_entries;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun /**
1464*4882a593Smuzhiyun  * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1465*4882a593Smuzhiyun  * @irq:	Interrupt number.
1466*4882a593Smuzhiyun  * @data:	Private data provided at interrupt registration, the AFU.
1467*4882a593Smuzhiyun  *
1468*4882a593Smuzhiyun  * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1469*4882a593Smuzhiyun  */
cxlflash_rrq_irq(int irq,void * data)1470*4882a593Smuzhiyun static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun 	struct hwq *hwq = (struct hwq *)data;
1473*4882a593Smuzhiyun 	struct afu *afu = hwq->afu;
1474*4882a593Smuzhiyun 	unsigned long hrrq_flags;
1475*4882a593Smuzhiyun 	LIST_HEAD(doneq);
1476*4882a593Smuzhiyun 	int num_entries = 0;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/* Silently drop spurious interrupts when queue is not online */
1481*4882a593Smuzhiyun 	if (!hwq->hrrq_online) {
1482*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1483*4882a593Smuzhiyun 		return IRQ_HANDLED;
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	if (afu_is_irqpoll_enabled(afu)) {
1487*4882a593Smuzhiyun 		irq_poll_sched(&hwq->irqpoll);
1488*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1489*4882a593Smuzhiyun 		return IRQ_HANDLED;
1490*4882a593Smuzhiyun 	}
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	num_entries = process_hrrq(hwq, &doneq, -1);
1493*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	if (num_entries == 0)
1496*4882a593Smuzhiyun 		return IRQ_NONE;
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	process_cmd_doneq(&doneq);
1499*4882a593Smuzhiyun 	return IRQ_HANDLED;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun  * Asynchronous interrupt information table
1504*4882a593Smuzhiyun  *
1505*4882a593Smuzhiyun  * NOTE:
1506*4882a593Smuzhiyun  *	- Order matters here as this array is indexed by bit position.
1507*4882a593Smuzhiyun  *
1508*4882a593Smuzhiyun  *	- The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1509*4882a593Smuzhiyun  *	  as complex and complains due to a lack of parentheses/braces.
1510*4882a593Smuzhiyun  */
1511*4882a593Smuzhiyun #define ASTATUS_FC(_a, _b, _c, _d)					 \
1512*4882a593Smuzhiyun 	{ SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun #define BUILD_SISL_ASTATUS_FC_PORT(_a)					 \
1515*4882a593Smuzhiyun 	ASTATUS_FC(_a, LINK_UP, "link up", 0),				 \
1516*4882a593Smuzhiyun 	ASTATUS_FC(_a, LINK_DN, "link down", 0),			 \
1517*4882a593Smuzhiyun 	ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),		 \
1518*4882a593Smuzhiyun 	ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),		 \
1519*4882a593Smuzhiyun 	ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1520*4882a593Smuzhiyun 	ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),	 \
1521*4882a593Smuzhiyun 	ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),		 \
1522*4882a593Smuzhiyun 	ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun static const struct asyc_intr_info ainfo[] = {
1525*4882a593Smuzhiyun 	BUILD_SISL_ASTATUS_FC_PORT(1),
1526*4882a593Smuzhiyun 	BUILD_SISL_ASTATUS_FC_PORT(0),
1527*4882a593Smuzhiyun 	BUILD_SISL_ASTATUS_FC_PORT(3),
1528*4882a593Smuzhiyun 	BUILD_SISL_ASTATUS_FC_PORT(2)
1529*4882a593Smuzhiyun };
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun /**
1532*4882a593Smuzhiyun  * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1533*4882a593Smuzhiyun  * @irq:	Interrupt number.
1534*4882a593Smuzhiyun  * @data:	Private data provided at interrupt registration, the AFU.
1535*4882a593Smuzhiyun  *
1536*4882a593Smuzhiyun  * Return: Always return IRQ_HANDLED.
1537*4882a593Smuzhiyun  */
cxlflash_async_err_irq(int irq,void * data)1538*4882a593Smuzhiyun static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1539*4882a593Smuzhiyun {
1540*4882a593Smuzhiyun 	struct hwq *hwq = (struct hwq *)data;
1541*4882a593Smuzhiyun 	struct afu *afu = hwq->afu;
1542*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
1543*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1544*4882a593Smuzhiyun 	const struct asyc_intr_info *info;
1545*4882a593Smuzhiyun 	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1546*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
1547*4882a593Smuzhiyun 	u64 reg_unmasked;
1548*4882a593Smuzhiyun 	u64 reg;
1549*4882a593Smuzhiyun 	u64 bit;
1550*4882a593Smuzhiyun 	u8 port;
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	reg = readq_be(&global->regs.aintr_status);
1553*4882a593Smuzhiyun 	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	if (unlikely(reg_unmasked == 0)) {
1556*4882a593Smuzhiyun 		dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1557*4882a593Smuzhiyun 			__func__, reg);
1558*4882a593Smuzhiyun 		goto out;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1562*4882a593Smuzhiyun 	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	/* Check each bit that is on */
1565*4882a593Smuzhiyun 	for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1566*4882a593Smuzhiyun 		if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1567*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1568*4882a593Smuzhiyun 			continue;
1569*4882a593Smuzhiyun 		}
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 		info = &ainfo[bit];
1572*4882a593Smuzhiyun 		if (unlikely(info->status != 1ULL << bit)) {
1573*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1574*4882a593Smuzhiyun 			continue;
1575*4882a593Smuzhiyun 		}
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 		port = info->port;
1578*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, port);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 		dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1581*4882a593Smuzhiyun 			__func__, port, info->desc,
1582*4882a593Smuzhiyun 		       readq_be(&fc_port_regs[FC_STATUS / 8]));
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 		/*
1585*4882a593Smuzhiyun 		 * Do link reset first, some OTHER errors will set FC_ERROR
1586*4882a593Smuzhiyun 		 * again if cleared before or w/o a reset
1587*4882a593Smuzhiyun 		 */
1588*4882a593Smuzhiyun 		if (info->action & LINK_RESET) {
1589*4882a593Smuzhiyun 			dev_err(dev, "%s: FC Port %d: resetting link\n",
1590*4882a593Smuzhiyun 				__func__, port);
1591*4882a593Smuzhiyun 			cfg->lr_state = LINK_RESET_REQUIRED;
1592*4882a593Smuzhiyun 			cfg->lr_port = port;
1593*4882a593Smuzhiyun 			schedule_work(&cfg->work_q);
1594*4882a593Smuzhiyun 		}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 		if (info->action & CLR_FC_ERROR) {
1597*4882a593Smuzhiyun 			reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 			/*
1600*4882a593Smuzhiyun 			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1601*4882a593Smuzhiyun 			 * should be the same and tracing one is sufficient.
1602*4882a593Smuzhiyun 			 */
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 			dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1605*4882a593Smuzhiyun 				__func__, port, reg);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 			writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1608*4882a593Smuzhiyun 			writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1609*4882a593Smuzhiyun 		}
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 		if (info->action & SCAN_HOST) {
1612*4882a593Smuzhiyun 			atomic_inc(&cfg->scan_host_needed);
1613*4882a593Smuzhiyun 			schedule_work(&cfg->work_q);
1614*4882a593Smuzhiyun 		}
1615*4882a593Smuzhiyun 	}
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun out:
1618*4882a593Smuzhiyun 	return IRQ_HANDLED;
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun /**
1622*4882a593Smuzhiyun  * read_vpd() - obtains the WWPNs from VPD
1623*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1624*4882a593Smuzhiyun  * @wwpn:	Array of size MAX_FC_PORTS to pass back WWPNs
1625*4882a593Smuzhiyun  *
1626*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1627*4882a593Smuzhiyun  */
read_vpd(struct cxlflash_cfg * cfg,u64 wwpn[])1628*4882a593Smuzhiyun static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1631*4882a593Smuzhiyun 	struct pci_dev *pdev = cfg->dev;
1632*4882a593Smuzhiyun 	int rc = 0;
1633*4882a593Smuzhiyun 	int ro_start, ro_size, i, j, k;
1634*4882a593Smuzhiyun 	ssize_t vpd_size;
1635*4882a593Smuzhiyun 	char vpd_data[CXLFLASH_VPD_LEN];
1636*4882a593Smuzhiyun 	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637*4882a593Smuzhiyun 	const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638*4882a593Smuzhiyun 						cfg->dev_id->driver_data;
1639*4882a593Smuzhiyun 	const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640*4882a593Smuzhiyun 	const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/* Get the VPD data from the device */
1643*4882a593Smuzhiyun 	vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644*4882a593Smuzhiyun 	if (unlikely(vpd_size <= 0)) {
1645*4882a593Smuzhiyun 		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646*4882a593Smuzhiyun 			__func__, vpd_size);
1647*4882a593Smuzhiyun 		rc = -ENODEV;
1648*4882a593Smuzhiyun 		goto out;
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	/* Get the read only section offset */
1652*4882a593Smuzhiyun 	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1653*4882a593Smuzhiyun 				    PCI_VPD_LRDT_RO_DATA);
1654*4882a593Smuzhiyun 	if (unlikely(ro_start < 0)) {
1655*4882a593Smuzhiyun 		dev_err(dev, "%s: VPD Read-only data not found\n", __func__);
1656*4882a593Smuzhiyun 		rc = -ENODEV;
1657*4882a593Smuzhiyun 		goto out;
1658*4882a593Smuzhiyun 	}
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	/* Get the read only section size, cap when extends beyond read VPD */
1661*4882a593Smuzhiyun 	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1662*4882a593Smuzhiyun 	j = ro_size;
1663*4882a593Smuzhiyun 	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1664*4882a593Smuzhiyun 	if (unlikely((i + j) > vpd_size)) {
1665*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Might need to read more VPD (%d > %ld)\n",
1666*4882a593Smuzhiyun 			__func__, (i + j), vpd_size);
1667*4882a593Smuzhiyun 		ro_size = vpd_size - i;
1668*4882a593Smuzhiyun 	}
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	/*
1671*4882a593Smuzhiyun 	 * Find the offset of the WWPN tag within the read only
1672*4882a593Smuzhiyun 	 * VPD data and validate the found field (partials are
1673*4882a593Smuzhiyun 	 * no good to us). Convert the ASCII data to an integer
1674*4882a593Smuzhiyun 	 * value. Note that we must copy to a temporary buffer
1675*4882a593Smuzhiyun 	 * because the conversion service requires that the ASCII
1676*4882a593Smuzhiyun 	 * string be terminated.
1677*4882a593Smuzhiyun 	 *
1678*4882a593Smuzhiyun 	 * Allow for WWPN not being found for all devices, setting
1679*4882a593Smuzhiyun 	 * the returned WWPN to zero when not found. Notify with a
1680*4882a593Smuzhiyun 	 * log error for cards that should have had WWPN keywords
1681*4882a593Smuzhiyun 	 * in the VPD - cards requiring WWPN will not have their
1682*4882a593Smuzhiyun 	 * ports programmed and operate in an undefined state.
1683*4882a593Smuzhiyun 	 */
1684*4882a593Smuzhiyun 	for (k = 0; k < cfg->num_fc_ports; k++) {
1685*4882a593Smuzhiyun 		j = ro_size;
1686*4882a593Smuzhiyun 		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1689*4882a593Smuzhiyun 		if (i < 0) {
1690*4882a593Smuzhiyun 			if (wwpn_vpd_required)
1691*4882a593Smuzhiyun 				dev_err(dev, "%s: Port %d WWPN not found\n",
1692*4882a593Smuzhiyun 					__func__, k);
1693*4882a593Smuzhiyun 			wwpn[k] = 0ULL;
1694*4882a593Smuzhiyun 			continue;
1695*4882a593Smuzhiyun 		}
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 		j = pci_vpd_info_field_size(&vpd_data[i]);
1698*4882a593Smuzhiyun 		i += PCI_VPD_INFO_FLD_HDR_SIZE;
1699*4882a593Smuzhiyun 		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1700*4882a593Smuzhiyun 			dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1701*4882a593Smuzhiyun 				__func__, k);
1702*4882a593Smuzhiyun 			rc = -ENODEV;
1703*4882a593Smuzhiyun 			goto out;
1704*4882a593Smuzhiyun 		}
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1707*4882a593Smuzhiyun 		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1708*4882a593Smuzhiyun 		if (unlikely(rc)) {
1709*4882a593Smuzhiyun 			dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1710*4882a593Smuzhiyun 				__func__, k);
1711*4882a593Smuzhiyun 			rc = -ENODEV;
1712*4882a593Smuzhiyun 			goto out;
1713*4882a593Smuzhiyun 		}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 		dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1716*4882a593Smuzhiyun 	}
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun out:
1719*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1720*4882a593Smuzhiyun 	return rc;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun /**
1724*4882a593Smuzhiyun  * init_pcr() - initialize the provisioning and control registers
1725*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1726*4882a593Smuzhiyun  *
1727*4882a593Smuzhiyun  * Also sets up fast access to the mapped registers and initializes AFU
1728*4882a593Smuzhiyun  * command fields that never change.
1729*4882a593Smuzhiyun  */
init_pcr(struct cxlflash_cfg * cfg)1730*4882a593Smuzhiyun static void init_pcr(struct cxlflash_cfg *cfg)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
1733*4882a593Smuzhiyun 	struct sisl_ctrl_map __iomem *ctrl_map;
1734*4882a593Smuzhiyun 	struct hwq *hwq;
1735*4882a593Smuzhiyun 	void *cookie;
1736*4882a593Smuzhiyun 	int i;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	for (i = 0; i < MAX_CONTEXT; i++) {
1739*4882a593Smuzhiyun 		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1740*4882a593Smuzhiyun 		/* Disrupt any clients that could be running */
1741*4882a593Smuzhiyun 		/* e.g. clients that survived a master restart */
1742*4882a593Smuzhiyun 		writeq_be(0, &ctrl_map->rht_start);
1743*4882a593Smuzhiyun 		writeq_be(0, &ctrl_map->rht_cnt_id);
1744*4882a593Smuzhiyun 		writeq_be(0, &ctrl_map->ctx_cap);
1745*4882a593Smuzhiyun 	}
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	/* Copy frequently used fields into hwq */
1748*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
1749*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
1750*4882a593Smuzhiyun 		cookie = hwq->ctx_cookie;
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 		hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1753*4882a593Smuzhiyun 		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1754*4882a593Smuzhiyun 		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 		/* Program the Endian Control for the master context */
1757*4882a593Smuzhiyun 		writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1758*4882a593Smuzhiyun 	}
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun /**
1762*4882a593Smuzhiyun  * init_global() - initialize AFU global registers
1763*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1764*4882a593Smuzhiyun  */
init_global(struct cxlflash_cfg * cfg)1765*4882a593Smuzhiyun static int init_global(struct cxlflash_cfg *cfg)
1766*4882a593Smuzhiyun {
1767*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
1768*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1769*4882a593Smuzhiyun 	struct hwq *hwq;
1770*4882a593Smuzhiyun 	struct sisl_host_map __iomem *hmap;
1771*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
1772*4882a593Smuzhiyun 	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
1773*4882a593Smuzhiyun 	int i = 0, num_ports = 0;
1774*4882a593Smuzhiyun 	int rc = 0;
1775*4882a593Smuzhiyun 	int j;
1776*4882a593Smuzhiyun 	void *ctx;
1777*4882a593Smuzhiyun 	u64 reg;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	rc = read_vpd(cfg, &wwpn[0]);
1780*4882a593Smuzhiyun 	if (rc) {
1781*4882a593Smuzhiyun 		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1782*4882a593Smuzhiyun 		goto out;
1783*4882a593Smuzhiyun 	}
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	/* Set up RRQ and SQ in HWQ for master issued cmds */
1786*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
1787*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
1788*4882a593Smuzhiyun 		hmap = hwq->host_map;
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1791*4882a593Smuzhiyun 		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1792*4882a593Smuzhiyun 		hwq->hrrq_online = true;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 		if (afu_is_sq_cmd_mode(afu)) {
1795*4882a593Smuzhiyun 			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1796*4882a593Smuzhiyun 			writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1797*4882a593Smuzhiyun 		}
1798*4882a593Smuzhiyun 	}
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 	/* AFU configuration */
1801*4882a593Smuzhiyun 	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1802*4882a593Smuzhiyun 	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1803*4882a593Smuzhiyun 	/* enable all auto retry options and control endianness */
1804*4882a593Smuzhiyun 	/* leave others at default: */
1805*4882a593Smuzhiyun 	/* CTX_CAP write protected, mbox_r does not clear on read and */
1806*4882a593Smuzhiyun 	/* checker on if dual afu */
1807*4882a593Smuzhiyun 	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	/* Global port select: select either port */
1810*4882a593Smuzhiyun 	if (afu->internal_lun) {
1811*4882a593Smuzhiyun 		/* Only use port 0 */
1812*4882a593Smuzhiyun 		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1813*4882a593Smuzhiyun 		num_ports = 0;
1814*4882a593Smuzhiyun 	} else {
1815*4882a593Smuzhiyun 		writeq_be(PORT_MASK(cfg->num_fc_ports),
1816*4882a593Smuzhiyun 			  &afu->afu_map->global.regs.afu_port_sel);
1817*4882a593Smuzhiyun 		num_ports = cfg->num_fc_ports;
1818*4882a593Smuzhiyun 	}
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	for (i = 0; i < num_ports; i++) {
1821*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, i);
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 		/* Unmask all errors (but they are still masked at AFU) */
1824*4882a593Smuzhiyun 		writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1825*4882a593Smuzhiyun 		/* Clear CRC error cnt & set a threshold */
1826*4882a593Smuzhiyun 		(void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1827*4882a593Smuzhiyun 		writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1830*4882a593Smuzhiyun 		if (wwpn[i] != 0)
1831*4882a593Smuzhiyun 			afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1832*4882a593Smuzhiyun 		/* Programming WWPN back to back causes additional
1833*4882a593Smuzhiyun 		 * offline/online transitions and a PLOGI
1834*4882a593Smuzhiyun 		 */
1835*4882a593Smuzhiyun 		msleep(100);
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 	if (afu_is_ocxl_lisn(afu)) {
1839*4882a593Smuzhiyun 		/* Set up the LISN effective address for each master */
1840*4882a593Smuzhiyun 		for (i = 0; i < afu->num_hwqs; i++) {
1841*4882a593Smuzhiyun 			hwq = get_hwq(afu, i);
1842*4882a593Smuzhiyun 			ctx = hwq->ctx_cookie;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 			for (j = 0; j < hwq->num_irqs; j++) {
1845*4882a593Smuzhiyun 				reg = cfg->ops->get_irq_objhndl(ctx, j);
1846*4882a593Smuzhiyun 				writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1847*4882a593Smuzhiyun 			}
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 			reg = hwq->ctx_hndl;
1850*4882a593Smuzhiyun 			writeq_be(SISL_LISN_PASID(reg, reg),
1851*4882a593Smuzhiyun 				  &hwq->ctrl_map->lisn_pasid[0]);
1852*4882a593Smuzhiyun 			writeq_be(SISL_LISN_PASID(0UL, reg),
1853*4882a593Smuzhiyun 				  &hwq->ctrl_map->lisn_pasid[1]);
1854*4882a593Smuzhiyun 		}
1855*4882a593Smuzhiyun 	}
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	/* Set up master's own CTX_CAP to allow real mode, host translation */
1858*4882a593Smuzhiyun 	/* tables, afu cmds and read/write GSCSI cmds. */
1859*4882a593Smuzhiyun 	/* First, unlock ctx_cap write by reading mbox */
1860*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
1861*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 		(void)readq_be(&hwq->ctrl_map->mbox_r);	/* unlock ctx_cap */
1864*4882a593Smuzhiyun 		writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1865*4882a593Smuzhiyun 			SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1866*4882a593Smuzhiyun 			SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1867*4882a593Smuzhiyun 			&hwq->ctrl_map->ctx_cap);
1868*4882a593Smuzhiyun 	}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun 	/*
1871*4882a593Smuzhiyun 	 * Determine write-same unmap support for host by evaluating the unmap
1872*4882a593Smuzhiyun 	 * sector support bit of the context control register associated with
1873*4882a593Smuzhiyun 	 * the primary hardware queue. Note that while this status is reflected
1874*4882a593Smuzhiyun 	 * in a context register, the outcome can be assumed to be host-wide.
1875*4882a593Smuzhiyun 	 */
1876*4882a593Smuzhiyun 	hwq = get_hwq(afu, PRIMARY_HWQ);
1877*4882a593Smuzhiyun 	reg = readq_be(&hwq->host_map->ctx_ctrl);
1878*4882a593Smuzhiyun 	if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1879*4882a593Smuzhiyun 		cfg->ws_unmap = true;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	/* Initialize heartbeat */
1882*4882a593Smuzhiyun 	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1883*4882a593Smuzhiyun out:
1884*4882a593Smuzhiyun 	return rc;
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun /**
1888*4882a593Smuzhiyun  * start_afu() - initializes and starts the AFU
1889*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1890*4882a593Smuzhiyun  */
start_afu(struct cxlflash_cfg * cfg)1891*4882a593Smuzhiyun static int start_afu(struct cxlflash_cfg *cfg)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
1894*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1895*4882a593Smuzhiyun 	struct hwq *hwq;
1896*4882a593Smuzhiyun 	int rc = 0;
1897*4882a593Smuzhiyun 	int i;
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	init_pcr(cfg);
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	/* Initialize each HWQ */
1902*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
1903*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 		/* After an AFU reset, RRQ entries are stale, clear them */
1906*4882a593Smuzhiyun 		memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 		/* Initialize RRQ pointers */
1909*4882a593Smuzhiyun 		hwq->hrrq_start = &hwq->rrq_entry[0];
1910*4882a593Smuzhiyun 		hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1911*4882a593Smuzhiyun 		hwq->hrrq_curr = hwq->hrrq_start;
1912*4882a593Smuzhiyun 		hwq->toggle = 1;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 		/* Initialize spin locks */
1915*4882a593Smuzhiyun 		spin_lock_init(&hwq->hrrq_slock);
1916*4882a593Smuzhiyun 		spin_lock_init(&hwq->hsq_slock);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 		/* Initialize SQ */
1919*4882a593Smuzhiyun 		if (afu_is_sq_cmd_mode(afu)) {
1920*4882a593Smuzhiyun 			memset(&hwq->sq, 0, sizeof(hwq->sq));
1921*4882a593Smuzhiyun 			hwq->hsq_start = &hwq->sq[0];
1922*4882a593Smuzhiyun 			hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1923*4882a593Smuzhiyun 			hwq->hsq_curr = hwq->hsq_start;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 			atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1926*4882a593Smuzhiyun 		}
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 		/* Initialize IRQ poll */
1929*4882a593Smuzhiyun 		if (afu_is_irqpoll_enabled(afu))
1930*4882a593Smuzhiyun 			irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1931*4882a593Smuzhiyun 				      cxlflash_irqpoll);
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	}
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	rc = init_global(cfg);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1938*4882a593Smuzhiyun 	return rc;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun /**
1942*4882a593Smuzhiyun  * init_intr() - setup interrupt handlers for the master context
1943*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
1944*4882a593Smuzhiyun  * @hwq:	Hardware queue to initialize.
1945*4882a593Smuzhiyun  *
1946*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
1947*4882a593Smuzhiyun  */
init_intr(struct cxlflash_cfg * cfg,struct hwq * hwq)1948*4882a593Smuzhiyun static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1949*4882a593Smuzhiyun 				 struct hwq *hwq)
1950*4882a593Smuzhiyun {
1951*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
1952*4882a593Smuzhiyun 	void *ctx = hwq->ctx_cookie;
1953*4882a593Smuzhiyun 	int rc = 0;
1954*4882a593Smuzhiyun 	enum undo_level level = UNDO_NOOP;
1955*4882a593Smuzhiyun 	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1956*4882a593Smuzhiyun 	int num_irqs = hwq->num_irqs;
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1959*4882a593Smuzhiyun 	if (unlikely(rc)) {
1960*4882a593Smuzhiyun 		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1961*4882a593Smuzhiyun 			__func__, rc);
1962*4882a593Smuzhiyun 		level = UNDO_NOOP;
1963*4882a593Smuzhiyun 		goto out;
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1967*4882a593Smuzhiyun 				   "SISL_MSI_SYNC_ERROR");
1968*4882a593Smuzhiyun 	if (unlikely(rc <= 0)) {
1969*4882a593Smuzhiyun 		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1970*4882a593Smuzhiyun 		level = FREE_IRQ;
1971*4882a593Smuzhiyun 		goto out;
1972*4882a593Smuzhiyun 	}
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1975*4882a593Smuzhiyun 				   "SISL_MSI_RRQ_UPDATED");
1976*4882a593Smuzhiyun 	if (unlikely(rc <= 0)) {
1977*4882a593Smuzhiyun 		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1978*4882a593Smuzhiyun 		level = UNMAP_ONE;
1979*4882a593Smuzhiyun 		goto out;
1980*4882a593Smuzhiyun 	}
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1983*4882a593Smuzhiyun 	if (!is_primary_hwq)
1984*4882a593Smuzhiyun 		goto out;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1987*4882a593Smuzhiyun 				   "SISL_MSI_ASYNC_ERROR");
1988*4882a593Smuzhiyun 	if (unlikely(rc <= 0)) {
1989*4882a593Smuzhiyun 		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1990*4882a593Smuzhiyun 		level = UNMAP_TWO;
1991*4882a593Smuzhiyun 		goto out;
1992*4882a593Smuzhiyun 	}
1993*4882a593Smuzhiyun out:
1994*4882a593Smuzhiyun 	return level;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun /**
1998*4882a593Smuzhiyun  * init_mc() - create and register as the master context
1999*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2000*4882a593Smuzhiyun  * index:	HWQ Index of the master context.
2001*4882a593Smuzhiyun  *
2002*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
2003*4882a593Smuzhiyun  */
init_mc(struct cxlflash_cfg * cfg,u32 index)2004*4882a593Smuzhiyun static int init_mc(struct cxlflash_cfg *cfg, u32 index)
2005*4882a593Smuzhiyun {
2006*4882a593Smuzhiyun 	void *ctx;
2007*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2008*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(cfg->afu, index);
2009*4882a593Smuzhiyun 	int rc = 0;
2010*4882a593Smuzhiyun 	int num_irqs;
2011*4882a593Smuzhiyun 	enum undo_level level;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	hwq->afu = cfg->afu;
2014*4882a593Smuzhiyun 	hwq->index = index;
2015*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hwq->pending_cmds);
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	if (index == PRIMARY_HWQ) {
2018*4882a593Smuzhiyun 		ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
2019*4882a593Smuzhiyun 		num_irqs = 3;
2020*4882a593Smuzhiyun 	} else {
2021*4882a593Smuzhiyun 		ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
2022*4882a593Smuzhiyun 		num_irqs = 2;
2023*4882a593Smuzhiyun 	}
2024*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(ctx)) {
2025*4882a593Smuzhiyun 		rc = -ENOMEM;
2026*4882a593Smuzhiyun 		goto err1;
2027*4882a593Smuzhiyun 	}
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	WARN_ON(hwq->ctx_cookie);
2030*4882a593Smuzhiyun 	hwq->ctx_cookie = ctx;
2031*4882a593Smuzhiyun 	hwq->num_irqs = num_irqs;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	/* Set it up as a master with the CXL */
2034*4882a593Smuzhiyun 	cfg->ops->set_master(ctx);
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	/* Reset AFU when initializing primary context */
2037*4882a593Smuzhiyun 	if (index == PRIMARY_HWQ) {
2038*4882a593Smuzhiyun 		rc = cfg->ops->afu_reset(ctx);
2039*4882a593Smuzhiyun 		if (unlikely(rc)) {
2040*4882a593Smuzhiyun 			dev_err(dev, "%s: AFU reset failed rc=%d\n",
2041*4882a593Smuzhiyun 				      __func__, rc);
2042*4882a593Smuzhiyun 			goto err1;
2043*4882a593Smuzhiyun 		}
2044*4882a593Smuzhiyun 	}
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	level = init_intr(cfg, hwq);
2047*4882a593Smuzhiyun 	if (unlikely(level)) {
2048*4882a593Smuzhiyun 		dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2049*4882a593Smuzhiyun 		goto err2;
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	/* Finally, activate the context by starting it */
2053*4882a593Smuzhiyun 	rc = cfg->ops->start_context(hwq->ctx_cookie);
2054*4882a593Smuzhiyun 	if (unlikely(rc)) {
2055*4882a593Smuzhiyun 		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2056*4882a593Smuzhiyun 		level = UNMAP_THREE;
2057*4882a593Smuzhiyun 		goto err2;
2058*4882a593Smuzhiyun 	}
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun out:
2061*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2062*4882a593Smuzhiyun 	return rc;
2063*4882a593Smuzhiyun err2:
2064*4882a593Smuzhiyun 	term_intr(cfg, level, index);
2065*4882a593Smuzhiyun 	if (index != PRIMARY_HWQ)
2066*4882a593Smuzhiyun 		cfg->ops->release_context(ctx);
2067*4882a593Smuzhiyun err1:
2068*4882a593Smuzhiyun 	hwq->ctx_cookie = NULL;
2069*4882a593Smuzhiyun 	goto out;
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun /**
2073*4882a593Smuzhiyun  * get_num_afu_ports() - determines and configures the number of AFU ports
2074*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2075*4882a593Smuzhiyun  *
2076*4882a593Smuzhiyun  * This routine determines the number of AFU ports by converting the global
2077*4882a593Smuzhiyun  * port selection mask. The converted value is only valid following an AFU
2078*4882a593Smuzhiyun  * reset (explicit or power-on). This routine must be invoked shortly after
2079*4882a593Smuzhiyun  * mapping as other routines are dependent on the number of ports during the
2080*4882a593Smuzhiyun  * initialization sequence.
2081*4882a593Smuzhiyun  *
2082*4882a593Smuzhiyun  * To support legacy AFUs that might not have reflected an initial global
2083*4882a593Smuzhiyun  * port mask (value read is 0), default to the number of ports originally
2084*4882a593Smuzhiyun  * supported by the cxlflash driver (2) before hardware with other port
2085*4882a593Smuzhiyun  * offerings was introduced.
2086*4882a593Smuzhiyun  */
get_num_afu_ports(struct cxlflash_cfg * cfg)2087*4882a593Smuzhiyun static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2088*4882a593Smuzhiyun {
2089*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2090*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2091*4882a593Smuzhiyun 	u64 port_mask;
2092*4882a593Smuzhiyun 	int num_fc_ports = LEGACY_FC_PORTS;
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2095*4882a593Smuzhiyun 	if (port_mask != 0ULL)
2096*4882a593Smuzhiyun 		num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2099*4882a593Smuzhiyun 		__func__, port_mask, num_fc_ports);
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	cfg->num_fc_ports = num_fc_ports;
2102*4882a593Smuzhiyun 	cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2103*4882a593Smuzhiyun }
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun /**
2106*4882a593Smuzhiyun  * init_afu() - setup as master context and start AFU
2107*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2108*4882a593Smuzhiyun  *
2109*4882a593Smuzhiyun  * This routine is a higher level of control for configuring the
2110*4882a593Smuzhiyun  * AFU on probe and reset paths.
2111*4882a593Smuzhiyun  *
2112*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
2113*4882a593Smuzhiyun  */
init_afu(struct cxlflash_cfg * cfg)2114*4882a593Smuzhiyun static int init_afu(struct cxlflash_cfg *cfg)
2115*4882a593Smuzhiyun {
2116*4882a593Smuzhiyun 	u64 reg;
2117*4882a593Smuzhiyun 	int rc = 0;
2118*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2119*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2120*4882a593Smuzhiyun 	struct hwq *hwq;
2121*4882a593Smuzhiyun 	int i;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	mutex_init(&afu->sync_active);
2126*4882a593Smuzhiyun 	afu->num_hwqs = afu->desired_hwqs;
2127*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
2128*4882a593Smuzhiyun 		rc = init_mc(cfg, i);
2129*4882a593Smuzhiyun 		if (rc) {
2130*4882a593Smuzhiyun 			dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2131*4882a593Smuzhiyun 				__func__, rc, i);
2132*4882a593Smuzhiyun 			goto err1;
2133*4882a593Smuzhiyun 		}
2134*4882a593Smuzhiyun 	}
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 	/* Map the entire MMIO space of the AFU using the first context */
2137*4882a593Smuzhiyun 	hwq = get_hwq(afu, PRIMARY_HWQ);
2138*4882a593Smuzhiyun 	afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2139*4882a593Smuzhiyun 	if (!afu->afu_map) {
2140*4882a593Smuzhiyun 		dev_err(dev, "%s: psa_map failed\n", __func__);
2141*4882a593Smuzhiyun 		rc = -ENOMEM;
2142*4882a593Smuzhiyun 		goto err1;
2143*4882a593Smuzhiyun 	}
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 	/* No byte reverse on reading afu_version or string will be backwards */
2146*4882a593Smuzhiyun 	reg = readq(&afu->afu_map->global.regs.afu_version);
2147*4882a593Smuzhiyun 	memcpy(afu->version, &reg, sizeof(reg));
2148*4882a593Smuzhiyun 	afu->interface_version =
2149*4882a593Smuzhiyun 	    readq_be(&afu->afu_map->global.regs.interface_version);
2150*4882a593Smuzhiyun 	if ((afu->interface_version + 1) == 0) {
2151*4882a593Smuzhiyun 		dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2152*4882a593Smuzhiyun 			"interface version %016llx\n", afu->version,
2153*4882a593Smuzhiyun 		       afu->interface_version);
2154*4882a593Smuzhiyun 		rc = -EINVAL;
2155*4882a593Smuzhiyun 		goto err1;
2156*4882a593Smuzhiyun 	}
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	if (afu_is_sq_cmd_mode(afu)) {
2159*4882a593Smuzhiyun 		afu->send_cmd = send_cmd_sq;
2160*4882a593Smuzhiyun 		afu->context_reset = context_reset_sq;
2161*4882a593Smuzhiyun 	} else {
2162*4882a593Smuzhiyun 		afu->send_cmd = send_cmd_ioarrin;
2163*4882a593Smuzhiyun 		afu->context_reset = context_reset_ioarrin;
2164*4882a593Smuzhiyun 	}
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2167*4882a593Smuzhiyun 		afu->version, afu->interface_version);
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	get_num_afu_ports(cfg);
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 	rc = start_afu(cfg);
2172*4882a593Smuzhiyun 	if (rc) {
2173*4882a593Smuzhiyun 		dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2174*4882a593Smuzhiyun 		goto err1;
2175*4882a593Smuzhiyun 	}
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	afu_err_intr_init(cfg->afu);
2178*4882a593Smuzhiyun 	for (i = 0; i < afu->num_hwqs; i++) {
2179*4882a593Smuzhiyun 		hwq = get_hwq(afu, i);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 		hwq->room = readq_be(&hwq->host_map->cmd_room);
2182*4882a593Smuzhiyun 	}
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	/* Restore the LUN mappings */
2185*4882a593Smuzhiyun 	cxlflash_restore_luntable(cfg);
2186*4882a593Smuzhiyun out:
2187*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2188*4882a593Smuzhiyun 	return rc;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun err1:
2191*4882a593Smuzhiyun 	for (i = afu->num_hwqs - 1; i >= 0; i--) {
2192*4882a593Smuzhiyun 		term_intr(cfg, UNMAP_THREE, i);
2193*4882a593Smuzhiyun 		term_mc(cfg, i);
2194*4882a593Smuzhiyun 	}
2195*4882a593Smuzhiyun 	goto out;
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun /**
2199*4882a593Smuzhiyun  * afu_reset() - resets the AFU
2200*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2201*4882a593Smuzhiyun  *
2202*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
2203*4882a593Smuzhiyun  */
afu_reset(struct cxlflash_cfg * cfg)2204*4882a593Smuzhiyun static int afu_reset(struct cxlflash_cfg *cfg)
2205*4882a593Smuzhiyun {
2206*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2207*4882a593Smuzhiyun 	int rc = 0;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	/* Stop the context before the reset. Since the context is
2210*4882a593Smuzhiyun 	 * no longer available restart it after the reset is complete
2211*4882a593Smuzhiyun 	 */
2212*4882a593Smuzhiyun 	term_afu(cfg);
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	rc = init_afu(cfg);
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2217*4882a593Smuzhiyun 	return rc;
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun /**
2221*4882a593Smuzhiyun  * drain_ioctls() - wait until all currently executing ioctls have completed
2222*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2223*4882a593Smuzhiyun  *
2224*4882a593Smuzhiyun  * Obtain write access to read/write semaphore that wraps ioctl
2225*4882a593Smuzhiyun  * handling to 'drain' ioctls currently executing.
2226*4882a593Smuzhiyun  */
drain_ioctls(struct cxlflash_cfg * cfg)2227*4882a593Smuzhiyun static void drain_ioctls(struct cxlflash_cfg *cfg)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun 	down_write(&cfg->ioctl_rwsem);
2230*4882a593Smuzhiyun 	up_write(&cfg->ioctl_rwsem);
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun 
2233*4882a593Smuzhiyun /**
2234*4882a593Smuzhiyun  * cxlflash_async_reset_host() - asynchronous host reset handler
2235*4882a593Smuzhiyun  * @data:	Private data provided while scheduling reset.
2236*4882a593Smuzhiyun  * @cookie:	Cookie that can be used for checkpointing.
2237*4882a593Smuzhiyun  */
cxlflash_async_reset_host(void * data,async_cookie_t cookie)2238*4882a593Smuzhiyun static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = data;
2241*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2242*4882a593Smuzhiyun 	int rc = 0;
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	if (cfg->state != STATE_RESET) {
2245*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2246*4882a593Smuzhiyun 			__func__, cfg->state);
2247*4882a593Smuzhiyun 		goto out;
2248*4882a593Smuzhiyun 	}
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	drain_ioctls(cfg);
2251*4882a593Smuzhiyun 	cxlflash_mark_contexts_error(cfg);
2252*4882a593Smuzhiyun 	rc = afu_reset(cfg);
2253*4882a593Smuzhiyun 	if (rc)
2254*4882a593Smuzhiyun 		cfg->state = STATE_FAILTERM;
2255*4882a593Smuzhiyun 	else
2256*4882a593Smuzhiyun 		cfg->state = STATE_NORMAL;
2257*4882a593Smuzhiyun 	wake_up_all(&cfg->reset_waitq);
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun out:
2260*4882a593Smuzhiyun 	scsi_unblock_requests(cfg->host);
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun /**
2264*4882a593Smuzhiyun  * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2265*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2266*4882a593Smuzhiyun  */
cxlflash_schedule_async_reset(struct cxlflash_cfg * cfg)2267*4882a593Smuzhiyun static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2268*4882a593Smuzhiyun {
2269*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	if (cfg->state != STATE_NORMAL) {
2272*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Not performing reset state=%d\n",
2273*4882a593Smuzhiyun 			__func__, cfg->state);
2274*4882a593Smuzhiyun 		return;
2275*4882a593Smuzhiyun 	}
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	cfg->state = STATE_RESET;
2278*4882a593Smuzhiyun 	scsi_block_requests(cfg->host);
2279*4882a593Smuzhiyun 	cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2280*4882a593Smuzhiyun 						 cfg);
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun /**
2284*4882a593Smuzhiyun  * send_afu_cmd() - builds and sends an internal AFU command
2285*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
2286*4882a593Smuzhiyun  * @rcb:	Pre-populated IOARCB describing command to send.
2287*4882a593Smuzhiyun  *
2288*4882a593Smuzhiyun  * The AFU can only take one internal AFU command at a time. This limitation is
2289*4882a593Smuzhiyun  * enforced by using a mutex to provide exclusive access to the AFU during the
2290*4882a593Smuzhiyun  * operation. This design point requires calling threads to not be on interrupt
2291*4882a593Smuzhiyun  * context due to the possibility of sleeping during concurrent AFU operations.
2292*4882a593Smuzhiyun  *
2293*4882a593Smuzhiyun  * The command status is optionally passed back to the caller when the caller
2294*4882a593Smuzhiyun  * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2295*4882a593Smuzhiyun  *
2296*4882a593Smuzhiyun  * Return:
2297*4882a593Smuzhiyun  *	0 on success, -errno on failure
2298*4882a593Smuzhiyun  */
send_afu_cmd(struct afu * afu,struct sisl_ioarcb * rcb)2299*4882a593Smuzhiyun static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2300*4882a593Smuzhiyun {
2301*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
2302*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2303*4882a593Smuzhiyun 	struct afu_cmd *cmd = NULL;
2304*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2305*4882a593Smuzhiyun 	ulong lock_flags;
2306*4882a593Smuzhiyun 	char *buf = NULL;
2307*4882a593Smuzhiyun 	int rc = 0;
2308*4882a593Smuzhiyun 	int nretry = 0;
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun 	if (cfg->state != STATE_NORMAL) {
2311*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Sync not required state=%u\n",
2312*4882a593Smuzhiyun 			__func__, cfg->state);
2313*4882a593Smuzhiyun 		return 0;
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	mutex_lock(&afu->sync_active);
2317*4882a593Smuzhiyun 	atomic_inc(&afu->cmds_active);
2318*4882a593Smuzhiyun 	buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2319*4882a593Smuzhiyun 	if (unlikely(!buf)) {
2320*4882a593Smuzhiyun 		dev_err(dev, "%s: no memory for command\n", __func__);
2321*4882a593Smuzhiyun 		rc = -ENOMEM;
2322*4882a593Smuzhiyun 		goto out;
2323*4882a593Smuzhiyun 	}
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun retry:
2328*4882a593Smuzhiyun 	memset(cmd, 0, sizeof(*cmd));
2329*4882a593Smuzhiyun 	memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2330*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cmd->queue);
2331*4882a593Smuzhiyun 	init_completion(&cmd->cevent);
2332*4882a593Smuzhiyun 	cmd->parent = afu;
2333*4882a593Smuzhiyun 	cmd->hwq_index = hwq->index;
2334*4882a593Smuzhiyun 	cmd->rcb.ctx_id = hwq->ctx_hndl;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2337*4882a593Smuzhiyun 		__func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	rc = afu->send_cmd(afu, cmd);
2340*4882a593Smuzhiyun 	if (unlikely(rc)) {
2341*4882a593Smuzhiyun 		rc = -ENOBUFS;
2342*4882a593Smuzhiyun 		goto out;
2343*4882a593Smuzhiyun 	}
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	rc = wait_resp(afu, cmd);
2346*4882a593Smuzhiyun 	switch (rc) {
2347*4882a593Smuzhiyun 	case -ETIMEDOUT:
2348*4882a593Smuzhiyun 		rc = afu->context_reset(hwq);
2349*4882a593Smuzhiyun 		if (rc) {
2350*4882a593Smuzhiyun 			/* Delete the command from pending_cmds list */
2351*4882a593Smuzhiyun 			spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2352*4882a593Smuzhiyun 			list_del(&cmd->list);
2353*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 			cxlflash_schedule_async_reset(cfg);
2356*4882a593Smuzhiyun 			break;
2357*4882a593Smuzhiyun 		}
2358*4882a593Smuzhiyun 		fallthrough;	/* to retry */
2359*4882a593Smuzhiyun 	case -EAGAIN:
2360*4882a593Smuzhiyun 		if (++nretry < 2)
2361*4882a593Smuzhiyun 			goto retry;
2362*4882a593Smuzhiyun 		fallthrough;	/* to exit */
2363*4882a593Smuzhiyun 	default:
2364*4882a593Smuzhiyun 		break;
2365*4882a593Smuzhiyun 	}
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	if (rcb->ioasa)
2368*4882a593Smuzhiyun 		*rcb->ioasa = cmd->sa;
2369*4882a593Smuzhiyun out:
2370*4882a593Smuzhiyun 	atomic_dec(&afu->cmds_active);
2371*4882a593Smuzhiyun 	mutex_unlock(&afu->sync_active);
2372*4882a593Smuzhiyun 	kfree(buf);
2373*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2374*4882a593Smuzhiyun 	return rc;
2375*4882a593Smuzhiyun }
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun /**
2378*4882a593Smuzhiyun  * cxlflash_afu_sync() - builds and sends an AFU sync command
2379*4882a593Smuzhiyun  * @afu:	AFU associated with the host.
2380*4882a593Smuzhiyun  * @ctx:	Identifies context requesting sync.
2381*4882a593Smuzhiyun  * @res:	Identifies resource requesting sync.
2382*4882a593Smuzhiyun  * @mode:	Type of sync to issue (lightweight, heavyweight, global).
2383*4882a593Smuzhiyun  *
2384*4882a593Smuzhiyun  * AFU sync operations are only necessary and allowed when the device is
2385*4882a593Smuzhiyun  * operating normally. When not operating normally, sync requests can occur as
2386*4882a593Smuzhiyun  * part of cleaning up resources associated with an adapter prior to removal.
2387*4882a593Smuzhiyun  * In this scenario, these requests are simply ignored (safe due to the AFU
2388*4882a593Smuzhiyun  * going away).
2389*4882a593Smuzhiyun  *
2390*4882a593Smuzhiyun  * Return:
2391*4882a593Smuzhiyun  *	0 on success, -errno on failure
2392*4882a593Smuzhiyun  */
cxlflash_afu_sync(struct afu * afu,ctx_hndl_t ctx,res_hndl_t res,u8 mode)2393*4882a593Smuzhiyun int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2394*4882a593Smuzhiyun {
2395*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = afu->parent;
2396*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2397*4882a593Smuzhiyun 	struct sisl_ioarcb rcb = { 0 };
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2400*4882a593Smuzhiyun 		__func__, afu, ctx, res, mode);
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2403*4882a593Smuzhiyun 	rcb.msi = SISL_MSI_RRQ_UPDATED;
2404*4882a593Smuzhiyun 	rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2407*4882a593Smuzhiyun 	rcb.cdb[1] = mode;
2408*4882a593Smuzhiyun 	put_unaligned_be16(ctx, &rcb.cdb[2]);
2409*4882a593Smuzhiyun 	put_unaligned_be32(res, &rcb.cdb[4]);
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 	return send_afu_cmd(afu, &rcb);
2412*4882a593Smuzhiyun }
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun /**
2415*4882a593Smuzhiyun  * cxlflash_eh_abort_handler() - abort a SCSI command
2416*4882a593Smuzhiyun  * @scp:	SCSI command to abort.
2417*4882a593Smuzhiyun  *
2418*4882a593Smuzhiyun  * CXL Flash devices do not support a single command abort. Reset the context
2419*4882a593Smuzhiyun  * as per SISLite specification. Flush any pending commands in the hardware
2420*4882a593Smuzhiyun  * queue before the reset.
2421*4882a593Smuzhiyun  *
2422*4882a593Smuzhiyun  * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2423*4882a593Smuzhiyun  */
cxlflash_eh_abort_handler(struct scsi_cmnd * scp)2424*4882a593Smuzhiyun static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2425*4882a593Smuzhiyun {
2426*4882a593Smuzhiyun 	int rc = FAILED;
2427*4882a593Smuzhiyun 	struct Scsi_Host *host = scp->device->host;
2428*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(host);
2429*4882a593Smuzhiyun 	struct afu_cmd *cmd = sc_to_afuc(scp);
2430*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2431*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2432*4882a593Smuzhiyun 	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2435*4882a593Smuzhiyun 		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2436*4882a593Smuzhiyun 		scp->device->channel, scp->device->id, scp->device->lun,
2437*4882a593Smuzhiyun 		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2438*4882a593Smuzhiyun 		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2439*4882a593Smuzhiyun 		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2440*4882a593Smuzhiyun 		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	/* When the state is not normal, another reset/reload is in progress.
2443*4882a593Smuzhiyun 	 * Return failed and the mid-layer will invoke host reset handler.
2444*4882a593Smuzhiyun 	 */
2445*4882a593Smuzhiyun 	if (cfg->state != STATE_NORMAL) {
2446*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2447*4882a593Smuzhiyun 			__func__, cfg->state);
2448*4882a593Smuzhiyun 		goto out;
2449*4882a593Smuzhiyun 	}
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	rc = afu->context_reset(hwq);
2452*4882a593Smuzhiyun 	if (unlikely(rc))
2453*4882a593Smuzhiyun 		goto out;
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	rc = SUCCESS;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun out:
2458*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2459*4882a593Smuzhiyun 	return rc;
2460*4882a593Smuzhiyun }
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun /**
2463*4882a593Smuzhiyun  * cxlflash_eh_device_reset_handler() - reset a single LUN
2464*4882a593Smuzhiyun  * @scp:	SCSI command to send.
2465*4882a593Smuzhiyun  *
2466*4882a593Smuzhiyun  * Return:
2467*4882a593Smuzhiyun  *	SUCCESS as defined in scsi/scsi.h
2468*4882a593Smuzhiyun  *	FAILED as defined in scsi/scsi.h
2469*4882a593Smuzhiyun  */
cxlflash_eh_device_reset_handler(struct scsi_cmnd * scp)2470*4882a593Smuzhiyun static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2471*4882a593Smuzhiyun {
2472*4882a593Smuzhiyun 	int rc = SUCCESS;
2473*4882a593Smuzhiyun 	struct scsi_device *sdev = scp->device;
2474*4882a593Smuzhiyun 	struct Scsi_Host *host = sdev->host;
2475*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(host);
2476*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2477*4882a593Smuzhiyun 	int rcr = 0;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2480*4882a593Smuzhiyun 		host->host_no, sdev->channel, sdev->id, sdev->lun);
2481*4882a593Smuzhiyun retry:
2482*4882a593Smuzhiyun 	switch (cfg->state) {
2483*4882a593Smuzhiyun 	case STATE_NORMAL:
2484*4882a593Smuzhiyun 		rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2485*4882a593Smuzhiyun 		if (unlikely(rcr))
2486*4882a593Smuzhiyun 			rc = FAILED;
2487*4882a593Smuzhiyun 		break;
2488*4882a593Smuzhiyun 	case STATE_RESET:
2489*4882a593Smuzhiyun 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2490*4882a593Smuzhiyun 		goto retry;
2491*4882a593Smuzhiyun 	default:
2492*4882a593Smuzhiyun 		rc = FAILED;
2493*4882a593Smuzhiyun 		break;
2494*4882a593Smuzhiyun 	}
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2497*4882a593Smuzhiyun 	return rc;
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun /**
2501*4882a593Smuzhiyun  * cxlflash_eh_host_reset_handler() - reset the host adapter
2502*4882a593Smuzhiyun  * @scp:	SCSI command from stack identifying host.
2503*4882a593Smuzhiyun  *
2504*4882a593Smuzhiyun  * Following a reset, the state is evaluated again in case an EEH occurred
2505*4882a593Smuzhiyun  * during the reset. In such a scenario, the host reset will either yield
2506*4882a593Smuzhiyun  * until the EEH recovery is complete or return success or failure based
2507*4882a593Smuzhiyun  * upon the current device state.
2508*4882a593Smuzhiyun  *
2509*4882a593Smuzhiyun  * Return:
2510*4882a593Smuzhiyun  *	SUCCESS as defined in scsi/scsi.h
2511*4882a593Smuzhiyun  *	FAILED as defined in scsi/scsi.h
2512*4882a593Smuzhiyun  */
cxlflash_eh_host_reset_handler(struct scsi_cmnd * scp)2513*4882a593Smuzhiyun static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2514*4882a593Smuzhiyun {
2515*4882a593Smuzhiyun 	int rc = SUCCESS;
2516*4882a593Smuzhiyun 	int rcr = 0;
2517*4882a593Smuzhiyun 	struct Scsi_Host *host = scp->device->host;
2518*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(host);
2519*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 	dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	switch (cfg->state) {
2524*4882a593Smuzhiyun 	case STATE_NORMAL:
2525*4882a593Smuzhiyun 		cfg->state = STATE_RESET;
2526*4882a593Smuzhiyun 		drain_ioctls(cfg);
2527*4882a593Smuzhiyun 		cxlflash_mark_contexts_error(cfg);
2528*4882a593Smuzhiyun 		rcr = afu_reset(cfg);
2529*4882a593Smuzhiyun 		if (rcr) {
2530*4882a593Smuzhiyun 			rc = FAILED;
2531*4882a593Smuzhiyun 			cfg->state = STATE_FAILTERM;
2532*4882a593Smuzhiyun 		} else
2533*4882a593Smuzhiyun 			cfg->state = STATE_NORMAL;
2534*4882a593Smuzhiyun 		wake_up_all(&cfg->reset_waitq);
2535*4882a593Smuzhiyun 		ssleep(1);
2536*4882a593Smuzhiyun 		fallthrough;
2537*4882a593Smuzhiyun 	case STATE_RESET:
2538*4882a593Smuzhiyun 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2539*4882a593Smuzhiyun 		if (cfg->state == STATE_NORMAL)
2540*4882a593Smuzhiyun 			break;
2541*4882a593Smuzhiyun 		fallthrough;
2542*4882a593Smuzhiyun 	default:
2543*4882a593Smuzhiyun 		rc = FAILED;
2544*4882a593Smuzhiyun 		break;
2545*4882a593Smuzhiyun 	}
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2548*4882a593Smuzhiyun 	return rc;
2549*4882a593Smuzhiyun }
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun /**
2552*4882a593Smuzhiyun  * cxlflash_change_queue_depth() - change the queue depth for the device
2553*4882a593Smuzhiyun  * @sdev:	SCSI device destined for queue depth change.
2554*4882a593Smuzhiyun  * @qdepth:	Requested queue depth value to set.
2555*4882a593Smuzhiyun  *
2556*4882a593Smuzhiyun  * The requested queue depth is capped to the maximum supported value.
2557*4882a593Smuzhiyun  *
2558*4882a593Smuzhiyun  * Return: The actual queue depth set.
2559*4882a593Smuzhiyun  */
cxlflash_change_queue_depth(struct scsi_device * sdev,int qdepth)2560*4882a593Smuzhiyun static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2561*4882a593Smuzhiyun {
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2564*4882a593Smuzhiyun 		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun 	scsi_change_queue_depth(sdev, qdepth);
2567*4882a593Smuzhiyun 	return sdev->queue_depth;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun 
2570*4882a593Smuzhiyun /**
2571*4882a593Smuzhiyun  * cxlflash_show_port_status() - queries and presents the current port status
2572*4882a593Smuzhiyun  * @port:	Desired port for status reporting.
2573*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2574*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2575*4882a593Smuzhiyun  *
2576*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2577*4882a593Smuzhiyun  */
cxlflash_show_port_status(u32 port,struct cxlflash_cfg * cfg,char * buf)2578*4882a593Smuzhiyun static ssize_t cxlflash_show_port_status(u32 port,
2579*4882a593Smuzhiyun 					 struct cxlflash_cfg *cfg,
2580*4882a593Smuzhiyun 					 char *buf)
2581*4882a593Smuzhiyun {
2582*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2583*4882a593Smuzhiyun 	char *disp_status;
2584*4882a593Smuzhiyun 	u64 status;
2585*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	WARN_ON(port >= MAX_FC_PORTS);
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	if (port >= cfg->num_fc_ports) {
2590*4882a593Smuzhiyun 		dev_info(dev, "%s: Port %d not supported on this card.\n",
2591*4882a593Smuzhiyun 			__func__, port);
2592*4882a593Smuzhiyun 		return -EINVAL;
2593*4882a593Smuzhiyun 	}
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	fc_port_regs = get_fc_port_regs(cfg, port);
2596*4882a593Smuzhiyun 	status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2597*4882a593Smuzhiyun 	status &= FC_MTIP_STATUS_MASK;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	if (status == FC_MTIP_STATUS_ONLINE)
2600*4882a593Smuzhiyun 		disp_status = "online";
2601*4882a593Smuzhiyun 	else if (status == FC_MTIP_STATUS_OFFLINE)
2602*4882a593Smuzhiyun 		disp_status = "offline";
2603*4882a593Smuzhiyun 	else
2604*4882a593Smuzhiyun 		disp_status = "unknown";
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2607*4882a593Smuzhiyun }
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun /**
2610*4882a593Smuzhiyun  * port0_show() - queries and presents the current status of port 0
2611*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2612*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2613*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2614*4882a593Smuzhiyun  *
2615*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2616*4882a593Smuzhiyun  */
port0_show(struct device * dev,struct device_attribute * attr,char * buf)2617*4882a593Smuzhiyun static ssize_t port0_show(struct device *dev,
2618*4882a593Smuzhiyun 			  struct device_attribute *attr,
2619*4882a593Smuzhiyun 			  char *buf)
2620*4882a593Smuzhiyun {
2621*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	return cxlflash_show_port_status(0, cfg, buf);
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun /**
2627*4882a593Smuzhiyun  * port1_show() - queries and presents the current status of port 1
2628*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2629*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2630*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2631*4882a593Smuzhiyun  *
2632*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2633*4882a593Smuzhiyun  */
port1_show(struct device * dev,struct device_attribute * attr,char * buf)2634*4882a593Smuzhiyun static ssize_t port1_show(struct device *dev,
2635*4882a593Smuzhiyun 			  struct device_attribute *attr,
2636*4882a593Smuzhiyun 			  char *buf)
2637*4882a593Smuzhiyun {
2638*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	return cxlflash_show_port_status(1, cfg, buf);
2641*4882a593Smuzhiyun }
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun /**
2644*4882a593Smuzhiyun  * port2_show() - queries and presents the current status of port 2
2645*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2646*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2647*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2648*4882a593Smuzhiyun  *
2649*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2650*4882a593Smuzhiyun  */
port2_show(struct device * dev,struct device_attribute * attr,char * buf)2651*4882a593Smuzhiyun static ssize_t port2_show(struct device *dev,
2652*4882a593Smuzhiyun 			  struct device_attribute *attr,
2653*4882a593Smuzhiyun 			  char *buf)
2654*4882a593Smuzhiyun {
2655*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	return cxlflash_show_port_status(2, cfg, buf);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun /**
2661*4882a593Smuzhiyun  * port3_show() - queries and presents the current status of port 3
2662*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2663*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2664*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2665*4882a593Smuzhiyun  *
2666*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2667*4882a593Smuzhiyun  */
port3_show(struct device * dev,struct device_attribute * attr,char * buf)2668*4882a593Smuzhiyun static ssize_t port3_show(struct device *dev,
2669*4882a593Smuzhiyun 			  struct device_attribute *attr,
2670*4882a593Smuzhiyun 			  char *buf)
2671*4882a593Smuzhiyun {
2672*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	return cxlflash_show_port_status(3, cfg, buf);
2675*4882a593Smuzhiyun }
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun /**
2678*4882a593Smuzhiyun  * lun_mode_show() - presents the current LUN mode of the host
2679*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2680*4882a593Smuzhiyun  * @attr:	Device attribute representing the LUN mode.
2681*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2682*4882a593Smuzhiyun  *
2683*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2684*4882a593Smuzhiyun  */
lun_mode_show(struct device * dev,struct device_attribute * attr,char * buf)2685*4882a593Smuzhiyun static ssize_t lun_mode_show(struct device *dev,
2686*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
2687*4882a593Smuzhiyun {
2688*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2689*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2692*4882a593Smuzhiyun }
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun /**
2695*4882a593Smuzhiyun  * lun_mode_store() - sets the LUN mode of the host
2696*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2697*4882a593Smuzhiyun  * @attr:	Device attribute representing the LUN mode.
2698*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2699*4882a593Smuzhiyun  * @count:	Length of data resizing in @buf.
2700*4882a593Smuzhiyun  *
2701*4882a593Smuzhiyun  * The CXL Flash AFU supports a dummy LUN mode where the external
2702*4882a593Smuzhiyun  * links and storage are not required. Space on the FPGA is used
2703*4882a593Smuzhiyun  * to create 1 or 2 small LUNs which are presented to the system
2704*4882a593Smuzhiyun  * as if they were a normal storage device. This feature is useful
2705*4882a593Smuzhiyun  * during development and also provides manufacturing with a way
2706*4882a593Smuzhiyun  * to test the AFU without an actual device.
2707*4882a593Smuzhiyun  *
2708*4882a593Smuzhiyun  * 0 = external LUN[s] (default)
2709*4882a593Smuzhiyun  * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2710*4882a593Smuzhiyun  * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2711*4882a593Smuzhiyun  * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2712*4882a593Smuzhiyun  * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2713*4882a593Smuzhiyun  *
2714*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2715*4882a593Smuzhiyun  */
lun_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2716*4882a593Smuzhiyun static ssize_t lun_mode_store(struct device *dev,
2717*4882a593Smuzhiyun 			      struct device_attribute *attr,
2718*4882a593Smuzhiyun 			      const char *buf, size_t count)
2719*4882a593Smuzhiyun {
2720*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
2721*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(shost);
2722*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2723*4882a593Smuzhiyun 	int rc;
2724*4882a593Smuzhiyun 	u32 lun_mode;
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	rc = kstrtouint(buf, 10, &lun_mode);
2727*4882a593Smuzhiyun 	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2728*4882a593Smuzhiyun 		afu->internal_lun = lun_mode;
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 		/*
2731*4882a593Smuzhiyun 		 * When configured for internal LUN, there is only one channel,
2732*4882a593Smuzhiyun 		 * channel number 0, else there will be one less than the number
2733*4882a593Smuzhiyun 		 * of fc ports for this card.
2734*4882a593Smuzhiyun 		 */
2735*4882a593Smuzhiyun 		if (afu->internal_lun)
2736*4882a593Smuzhiyun 			shost->max_channel = 0;
2737*4882a593Smuzhiyun 		else
2738*4882a593Smuzhiyun 			shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 		afu_reset(cfg);
2741*4882a593Smuzhiyun 		scsi_scan_host(cfg->host);
2742*4882a593Smuzhiyun 	}
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	return count;
2745*4882a593Smuzhiyun }
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun /**
2748*4882a593Smuzhiyun  * ioctl_version_show() - presents the current ioctl version of the host
2749*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2750*4882a593Smuzhiyun  * @attr:	Device attribute representing the ioctl version.
2751*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2752*4882a593Smuzhiyun  *
2753*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2754*4882a593Smuzhiyun  */
ioctl_version_show(struct device * dev,struct device_attribute * attr,char * buf)2755*4882a593Smuzhiyun static ssize_t ioctl_version_show(struct device *dev,
2756*4882a593Smuzhiyun 				  struct device_attribute *attr, char *buf)
2757*4882a593Smuzhiyun {
2758*4882a593Smuzhiyun 	ssize_t bytes = 0;
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 	bytes = scnprintf(buf, PAGE_SIZE,
2761*4882a593Smuzhiyun 			  "disk: %u\n", DK_CXLFLASH_VERSION_0);
2762*4882a593Smuzhiyun 	bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2763*4882a593Smuzhiyun 			   "host: %u\n", HT_CXLFLASH_VERSION_0);
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 	return bytes;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun /**
2769*4882a593Smuzhiyun  * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2770*4882a593Smuzhiyun  * @port:	Desired port for status reporting.
2771*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
2772*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2773*4882a593Smuzhiyun  *
2774*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf or -EINVAL.
2775*4882a593Smuzhiyun  */
cxlflash_show_port_lun_table(u32 port,struct cxlflash_cfg * cfg,char * buf)2776*4882a593Smuzhiyun static ssize_t cxlflash_show_port_lun_table(u32 port,
2777*4882a593Smuzhiyun 					    struct cxlflash_cfg *cfg,
2778*4882a593Smuzhiyun 					    char *buf)
2779*4882a593Smuzhiyun {
2780*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
2781*4882a593Smuzhiyun 	__be64 __iomem *fc_port_luns;
2782*4882a593Smuzhiyun 	int i;
2783*4882a593Smuzhiyun 	ssize_t bytes = 0;
2784*4882a593Smuzhiyun 
2785*4882a593Smuzhiyun 	WARN_ON(port >= MAX_FC_PORTS);
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	if (port >= cfg->num_fc_ports) {
2788*4882a593Smuzhiyun 		dev_info(dev, "%s: Port %d not supported on this card.\n",
2789*4882a593Smuzhiyun 			__func__, port);
2790*4882a593Smuzhiyun 		return -EINVAL;
2791*4882a593Smuzhiyun 	}
2792*4882a593Smuzhiyun 
2793*4882a593Smuzhiyun 	fc_port_luns = get_fc_port_luns(cfg, port);
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2796*4882a593Smuzhiyun 		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2797*4882a593Smuzhiyun 				   "%03d: %016llx\n",
2798*4882a593Smuzhiyun 				   i, readq_be(&fc_port_luns[i]));
2799*4882a593Smuzhiyun 	return bytes;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun /**
2803*4882a593Smuzhiyun  * port0_lun_table_show() - presents the current LUN table of port 0
2804*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2805*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2806*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2807*4882a593Smuzhiyun  *
2808*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2809*4882a593Smuzhiyun  */
port0_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2810*4882a593Smuzhiyun static ssize_t port0_lun_table_show(struct device *dev,
2811*4882a593Smuzhiyun 				    struct device_attribute *attr,
2812*4882a593Smuzhiyun 				    char *buf)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	return cxlflash_show_port_lun_table(0, cfg, buf);
2817*4882a593Smuzhiyun }
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun /**
2820*4882a593Smuzhiyun  * port1_lun_table_show() - presents the current LUN table of port 1
2821*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2822*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2823*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2824*4882a593Smuzhiyun  *
2825*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2826*4882a593Smuzhiyun  */
port1_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2827*4882a593Smuzhiyun static ssize_t port1_lun_table_show(struct device *dev,
2828*4882a593Smuzhiyun 				    struct device_attribute *attr,
2829*4882a593Smuzhiyun 				    char *buf)
2830*4882a593Smuzhiyun {
2831*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	return cxlflash_show_port_lun_table(1, cfg, buf);
2834*4882a593Smuzhiyun }
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun /**
2837*4882a593Smuzhiyun  * port2_lun_table_show() - presents the current LUN table of port 2
2838*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2839*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2840*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2841*4882a593Smuzhiyun  *
2842*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2843*4882a593Smuzhiyun  */
port2_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2844*4882a593Smuzhiyun static ssize_t port2_lun_table_show(struct device *dev,
2845*4882a593Smuzhiyun 				    struct device_attribute *attr,
2846*4882a593Smuzhiyun 				    char *buf)
2847*4882a593Smuzhiyun {
2848*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	return cxlflash_show_port_lun_table(2, cfg, buf);
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun /**
2854*4882a593Smuzhiyun  * port3_lun_table_show() - presents the current LUN table of port 3
2855*4882a593Smuzhiyun  * @dev:	Generic device associated with the host owning the port.
2856*4882a593Smuzhiyun  * @attr:	Device attribute representing the port.
2857*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2858*4882a593Smuzhiyun  *
2859*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2860*4882a593Smuzhiyun  */
port3_lun_table_show(struct device * dev,struct device_attribute * attr,char * buf)2861*4882a593Smuzhiyun static ssize_t port3_lun_table_show(struct device *dev,
2862*4882a593Smuzhiyun 				    struct device_attribute *attr,
2863*4882a593Smuzhiyun 				    char *buf)
2864*4882a593Smuzhiyun {
2865*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2866*4882a593Smuzhiyun 
2867*4882a593Smuzhiyun 	return cxlflash_show_port_lun_table(3, cfg, buf);
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun /**
2871*4882a593Smuzhiyun  * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2872*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2873*4882a593Smuzhiyun  * @attr:	Device attribute representing the IRQ poll weight.
2874*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the current IRQ poll
2875*4882a593Smuzhiyun  *		weight in ASCII.
2876*4882a593Smuzhiyun  *
2877*4882a593Smuzhiyun  * An IRQ poll weight of 0 indicates polling is disabled.
2878*4882a593Smuzhiyun  *
2879*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2880*4882a593Smuzhiyun  */
irqpoll_weight_show(struct device * dev,struct device_attribute * attr,char * buf)2881*4882a593Smuzhiyun static ssize_t irqpoll_weight_show(struct device *dev,
2882*4882a593Smuzhiyun 				   struct device_attribute *attr, char *buf)
2883*4882a593Smuzhiyun {
2884*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2885*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2888*4882a593Smuzhiyun }
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun /**
2891*4882a593Smuzhiyun  * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2892*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2893*4882a593Smuzhiyun  * @attr:	Device attribute representing the IRQ poll weight.
2894*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE containing the desired IRQ poll
2895*4882a593Smuzhiyun  *		weight in ASCII.
2896*4882a593Smuzhiyun  * @count:	Length of data resizing in @buf.
2897*4882a593Smuzhiyun  *
2898*4882a593Smuzhiyun  * An IRQ poll weight of 0 indicates polling is disabled.
2899*4882a593Smuzhiyun  *
2900*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2901*4882a593Smuzhiyun  */
irqpoll_weight_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2902*4882a593Smuzhiyun static ssize_t irqpoll_weight_store(struct device *dev,
2903*4882a593Smuzhiyun 				    struct device_attribute *attr,
2904*4882a593Smuzhiyun 				    const char *buf, size_t count)
2905*4882a593Smuzhiyun {
2906*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2907*4882a593Smuzhiyun 	struct device *cfgdev = &cfg->dev->dev;
2908*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2909*4882a593Smuzhiyun 	struct hwq *hwq;
2910*4882a593Smuzhiyun 	u32 weight;
2911*4882a593Smuzhiyun 	int rc, i;
2912*4882a593Smuzhiyun 
2913*4882a593Smuzhiyun 	rc = kstrtouint(buf, 10, &weight);
2914*4882a593Smuzhiyun 	if (rc)
2915*4882a593Smuzhiyun 		return -EINVAL;
2916*4882a593Smuzhiyun 
2917*4882a593Smuzhiyun 	if (weight > 256) {
2918*4882a593Smuzhiyun 		dev_info(cfgdev,
2919*4882a593Smuzhiyun 			 "Invalid IRQ poll weight. It must be 256 or less.\n");
2920*4882a593Smuzhiyun 		return -EINVAL;
2921*4882a593Smuzhiyun 	}
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	if (weight == afu->irqpoll_weight) {
2924*4882a593Smuzhiyun 		dev_info(cfgdev,
2925*4882a593Smuzhiyun 			 "Current IRQ poll weight has the same weight.\n");
2926*4882a593Smuzhiyun 		return -EINVAL;
2927*4882a593Smuzhiyun 	}
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun 	if (afu_is_irqpoll_enabled(afu)) {
2930*4882a593Smuzhiyun 		for (i = 0; i < afu->num_hwqs; i++) {
2931*4882a593Smuzhiyun 			hwq = get_hwq(afu, i);
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 			irq_poll_disable(&hwq->irqpoll);
2934*4882a593Smuzhiyun 		}
2935*4882a593Smuzhiyun 	}
2936*4882a593Smuzhiyun 
2937*4882a593Smuzhiyun 	afu->irqpoll_weight = weight;
2938*4882a593Smuzhiyun 
2939*4882a593Smuzhiyun 	if (weight > 0) {
2940*4882a593Smuzhiyun 		for (i = 0; i < afu->num_hwqs; i++) {
2941*4882a593Smuzhiyun 			hwq = get_hwq(afu, i);
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 			irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2944*4882a593Smuzhiyun 		}
2945*4882a593Smuzhiyun 	}
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 	return count;
2948*4882a593Smuzhiyun }
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun /**
2951*4882a593Smuzhiyun  * num_hwqs_show() - presents the number of hardware queues for the host
2952*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2953*4882a593Smuzhiyun  * @attr:	Device attribute representing the number of hardware queues.
2954*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the number of hardware
2955*4882a593Smuzhiyun  *		queues in ASCII.
2956*4882a593Smuzhiyun  *
2957*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2958*4882a593Smuzhiyun  */
num_hwqs_show(struct device * dev,struct device_attribute * attr,char * buf)2959*4882a593Smuzhiyun static ssize_t num_hwqs_show(struct device *dev,
2960*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
2961*4882a593Smuzhiyun {
2962*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2963*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2966*4882a593Smuzhiyun }
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun /**
2969*4882a593Smuzhiyun  * num_hwqs_store() - sets the number of hardware queues for the host
2970*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
2971*4882a593Smuzhiyun  * @attr:	Device attribute representing the number of hardware queues.
2972*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE containing the number of hardware
2973*4882a593Smuzhiyun  *		queues in ASCII.
2974*4882a593Smuzhiyun  * @count:	Length of data resizing in @buf.
2975*4882a593Smuzhiyun  *
2976*4882a593Smuzhiyun  * n > 0: num_hwqs = n
2977*4882a593Smuzhiyun  * n = 0: num_hwqs = num_online_cpus()
2978*4882a593Smuzhiyun  * n < 0: num_online_cpus() / abs(n)
2979*4882a593Smuzhiyun  *
2980*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
2981*4882a593Smuzhiyun  */
num_hwqs_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)2982*4882a593Smuzhiyun static ssize_t num_hwqs_store(struct device *dev,
2983*4882a593Smuzhiyun 			      struct device_attribute *attr,
2984*4882a593Smuzhiyun 			      const char *buf, size_t count)
2985*4882a593Smuzhiyun {
2986*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2987*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
2988*4882a593Smuzhiyun 	int rc;
2989*4882a593Smuzhiyun 	int nhwqs, num_hwqs;
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 	rc = kstrtoint(buf, 10, &nhwqs);
2992*4882a593Smuzhiyun 	if (rc)
2993*4882a593Smuzhiyun 		return -EINVAL;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	if (nhwqs >= 1)
2996*4882a593Smuzhiyun 		num_hwqs = nhwqs;
2997*4882a593Smuzhiyun 	else if (nhwqs == 0)
2998*4882a593Smuzhiyun 		num_hwqs = num_online_cpus();
2999*4882a593Smuzhiyun 	else
3000*4882a593Smuzhiyun 		num_hwqs = num_online_cpus() / abs(nhwqs);
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun 	afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
3003*4882a593Smuzhiyun 	WARN_ON_ONCE(afu->desired_hwqs == 0);
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun retry:
3006*4882a593Smuzhiyun 	switch (cfg->state) {
3007*4882a593Smuzhiyun 	case STATE_NORMAL:
3008*4882a593Smuzhiyun 		cfg->state = STATE_RESET;
3009*4882a593Smuzhiyun 		drain_ioctls(cfg);
3010*4882a593Smuzhiyun 		cxlflash_mark_contexts_error(cfg);
3011*4882a593Smuzhiyun 		rc = afu_reset(cfg);
3012*4882a593Smuzhiyun 		if (rc)
3013*4882a593Smuzhiyun 			cfg->state = STATE_FAILTERM;
3014*4882a593Smuzhiyun 		else
3015*4882a593Smuzhiyun 			cfg->state = STATE_NORMAL;
3016*4882a593Smuzhiyun 		wake_up_all(&cfg->reset_waitq);
3017*4882a593Smuzhiyun 		break;
3018*4882a593Smuzhiyun 	case STATE_RESET:
3019*4882a593Smuzhiyun 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
3020*4882a593Smuzhiyun 		if (cfg->state == STATE_NORMAL)
3021*4882a593Smuzhiyun 			goto retry;
3022*4882a593Smuzhiyun 		fallthrough;
3023*4882a593Smuzhiyun 	default:
3024*4882a593Smuzhiyun 		/* Ideally should not happen */
3025*4882a593Smuzhiyun 		dev_err(dev, "%s: Device is not ready, state=%d\n",
3026*4882a593Smuzhiyun 			__func__, cfg->state);
3027*4882a593Smuzhiyun 		break;
3028*4882a593Smuzhiyun 	}
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 	return count;
3031*4882a593Smuzhiyun }
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun /**
3036*4882a593Smuzhiyun  * hwq_mode_show() - presents the HWQ steering mode for the host
3037*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
3038*4882a593Smuzhiyun  * @attr:	Device attribute representing the HWQ steering mode.
3039*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the HWQ steering mode
3040*4882a593Smuzhiyun  *		as a character string.
3041*4882a593Smuzhiyun  *
3042*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
3043*4882a593Smuzhiyun  */
hwq_mode_show(struct device * dev,struct device_attribute * attr,char * buf)3044*4882a593Smuzhiyun static ssize_t hwq_mode_show(struct device *dev,
3045*4882a593Smuzhiyun 			     struct device_attribute *attr, char *buf)
3046*4882a593Smuzhiyun {
3047*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3048*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3051*4882a593Smuzhiyun }
3052*4882a593Smuzhiyun 
3053*4882a593Smuzhiyun /**
3054*4882a593Smuzhiyun  * hwq_mode_store() - sets the HWQ steering mode for the host
3055*4882a593Smuzhiyun  * @dev:	Generic device associated with the host.
3056*4882a593Smuzhiyun  * @attr:	Device attribute representing the HWQ steering mode.
3057*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE containing the HWQ steering mode
3058*4882a593Smuzhiyun  *		as a character string.
3059*4882a593Smuzhiyun  * @count:	Length of data resizing in @buf.
3060*4882a593Smuzhiyun  *
3061*4882a593Smuzhiyun  * rr = Round-Robin
3062*4882a593Smuzhiyun  * tag = Block MQ Tagging
3063*4882a593Smuzhiyun  * cpu = CPU Affinity
3064*4882a593Smuzhiyun  *
3065*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
3066*4882a593Smuzhiyun  */
hwq_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3067*4882a593Smuzhiyun static ssize_t hwq_mode_store(struct device *dev,
3068*4882a593Smuzhiyun 			      struct device_attribute *attr,
3069*4882a593Smuzhiyun 			      const char *buf, size_t count)
3070*4882a593Smuzhiyun {
3071*4882a593Smuzhiyun 	struct Scsi_Host *shost = class_to_shost(dev);
3072*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = shost_priv(shost);
3073*4882a593Smuzhiyun 	struct device *cfgdev = &cfg->dev->dev;
3074*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
3075*4882a593Smuzhiyun 	int i;
3076*4882a593Smuzhiyun 	u32 mode = MAX_HWQ_MODE;
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 	for (i = 0; i < MAX_HWQ_MODE; i++) {
3079*4882a593Smuzhiyun 		if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3080*4882a593Smuzhiyun 			mode = i;
3081*4882a593Smuzhiyun 			break;
3082*4882a593Smuzhiyun 		}
3083*4882a593Smuzhiyun 	}
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 	if (mode >= MAX_HWQ_MODE) {
3086*4882a593Smuzhiyun 		dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3087*4882a593Smuzhiyun 		return -EINVAL;
3088*4882a593Smuzhiyun 	}
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 	afu->hwq_mode = mode;
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 	return count;
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun /**
3096*4882a593Smuzhiyun  * mode_show() - presents the current mode of the device
3097*4882a593Smuzhiyun  * @dev:	Generic device associated with the device.
3098*4882a593Smuzhiyun  * @attr:	Device attribute representing the device mode.
3099*4882a593Smuzhiyun  * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3100*4882a593Smuzhiyun  *
3101*4882a593Smuzhiyun  * Return: The size of the ASCII string returned in @buf.
3102*4882a593Smuzhiyun  */
mode_show(struct device * dev,struct device_attribute * attr,char * buf)3103*4882a593Smuzhiyun static ssize_t mode_show(struct device *dev,
3104*4882a593Smuzhiyun 			 struct device_attribute *attr, char *buf)
3105*4882a593Smuzhiyun {
3106*4882a593Smuzhiyun 	struct scsi_device *sdev = to_scsi_device(dev);
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun 	return scnprintf(buf, PAGE_SIZE, "%s\n",
3109*4882a593Smuzhiyun 			 sdev->hostdata ? "superpipe" : "legacy");
3110*4882a593Smuzhiyun }
3111*4882a593Smuzhiyun 
3112*4882a593Smuzhiyun /*
3113*4882a593Smuzhiyun  * Host attributes
3114*4882a593Smuzhiyun  */
3115*4882a593Smuzhiyun static DEVICE_ATTR_RO(port0);
3116*4882a593Smuzhiyun static DEVICE_ATTR_RO(port1);
3117*4882a593Smuzhiyun static DEVICE_ATTR_RO(port2);
3118*4882a593Smuzhiyun static DEVICE_ATTR_RO(port3);
3119*4882a593Smuzhiyun static DEVICE_ATTR_RW(lun_mode);
3120*4882a593Smuzhiyun static DEVICE_ATTR_RO(ioctl_version);
3121*4882a593Smuzhiyun static DEVICE_ATTR_RO(port0_lun_table);
3122*4882a593Smuzhiyun static DEVICE_ATTR_RO(port1_lun_table);
3123*4882a593Smuzhiyun static DEVICE_ATTR_RO(port2_lun_table);
3124*4882a593Smuzhiyun static DEVICE_ATTR_RO(port3_lun_table);
3125*4882a593Smuzhiyun static DEVICE_ATTR_RW(irqpoll_weight);
3126*4882a593Smuzhiyun static DEVICE_ATTR_RW(num_hwqs);
3127*4882a593Smuzhiyun static DEVICE_ATTR_RW(hwq_mode);
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun static struct device_attribute *cxlflash_host_attrs[] = {
3130*4882a593Smuzhiyun 	&dev_attr_port0,
3131*4882a593Smuzhiyun 	&dev_attr_port1,
3132*4882a593Smuzhiyun 	&dev_attr_port2,
3133*4882a593Smuzhiyun 	&dev_attr_port3,
3134*4882a593Smuzhiyun 	&dev_attr_lun_mode,
3135*4882a593Smuzhiyun 	&dev_attr_ioctl_version,
3136*4882a593Smuzhiyun 	&dev_attr_port0_lun_table,
3137*4882a593Smuzhiyun 	&dev_attr_port1_lun_table,
3138*4882a593Smuzhiyun 	&dev_attr_port2_lun_table,
3139*4882a593Smuzhiyun 	&dev_attr_port3_lun_table,
3140*4882a593Smuzhiyun 	&dev_attr_irqpoll_weight,
3141*4882a593Smuzhiyun 	&dev_attr_num_hwqs,
3142*4882a593Smuzhiyun 	&dev_attr_hwq_mode,
3143*4882a593Smuzhiyun 	NULL
3144*4882a593Smuzhiyun };
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun /*
3147*4882a593Smuzhiyun  * Device attributes
3148*4882a593Smuzhiyun  */
3149*4882a593Smuzhiyun static DEVICE_ATTR_RO(mode);
3150*4882a593Smuzhiyun 
3151*4882a593Smuzhiyun static struct device_attribute *cxlflash_dev_attrs[] = {
3152*4882a593Smuzhiyun 	&dev_attr_mode,
3153*4882a593Smuzhiyun 	NULL
3154*4882a593Smuzhiyun };
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun /*
3157*4882a593Smuzhiyun  * Host template
3158*4882a593Smuzhiyun  */
3159*4882a593Smuzhiyun static struct scsi_host_template driver_template = {
3160*4882a593Smuzhiyun 	.module = THIS_MODULE,
3161*4882a593Smuzhiyun 	.name = CXLFLASH_ADAPTER_NAME,
3162*4882a593Smuzhiyun 	.info = cxlflash_driver_info,
3163*4882a593Smuzhiyun 	.ioctl = cxlflash_ioctl,
3164*4882a593Smuzhiyun 	.proc_name = CXLFLASH_NAME,
3165*4882a593Smuzhiyun 	.queuecommand = cxlflash_queuecommand,
3166*4882a593Smuzhiyun 	.eh_abort_handler = cxlflash_eh_abort_handler,
3167*4882a593Smuzhiyun 	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3168*4882a593Smuzhiyun 	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3169*4882a593Smuzhiyun 	.change_queue_depth = cxlflash_change_queue_depth,
3170*4882a593Smuzhiyun 	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3171*4882a593Smuzhiyun 	.can_queue = CXLFLASH_MAX_CMDS,
3172*4882a593Smuzhiyun 	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3173*4882a593Smuzhiyun 	.this_id = -1,
3174*4882a593Smuzhiyun 	.sg_tablesize = 1,	/* No scatter gather support */
3175*4882a593Smuzhiyun 	.max_sectors = CXLFLASH_MAX_SECTORS,
3176*4882a593Smuzhiyun 	.shost_attrs = cxlflash_host_attrs,
3177*4882a593Smuzhiyun 	.sdev_attrs = cxlflash_dev_attrs,
3178*4882a593Smuzhiyun };
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun /*
3181*4882a593Smuzhiyun  * Device dependent values
3182*4882a593Smuzhiyun  */
3183*4882a593Smuzhiyun static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3184*4882a593Smuzhiyun 					CXLFLASH_WWPN_VPD_REQUIRED };
3185*4882a593Smuzhiyun static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3186*4882a593Smuzhiyun 					CXLFLASH_NOTIFY_SHUTDOWN };
3187*4882a593Smuzhiyun static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3188*4882a593Smuzhiyun 					(CXLFLASH_NOTIFY_SHUTDOWN |
3189*4882a593Smuzhiyun 					CXLFLASH_OCXL_DEV) };
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun /*
3192*4882a593Smuzhiyun  * PCI device binding table
3193*4882a593Smuzhiyun  */
3194*4882a593Smuzhiyun static struct pci_device_id cxlflash_pci_table[] = {
3195*4882a593Smuzhiyun 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3196*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3197*4882a593Smuzhiyun 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3198*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3199*4882a593Smuzhiyun 	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3200*4882a593Smuzhiyun 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3201*4882a593Smuzhiyun 	{}
3202*4882a593Smuzhiyun };
3203*4882a593Smuzhiyun 
3204*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3205*4882a593Smuzhiyun 
3206*4882a593Smuzhiyun /**
3207*4882a593Smuzhiyun  * cxlflash_worker_thread() - work thread handler for the AFU
3208*4882a593Smuzhiyun  * @work:	Work structure contained within cxlflash associated with host.
3209*4882a593Smuzhiyun  *
3210*4882a593Smuzhiyun  * Handles the following events:
3211*4882a593Smuzhiyun  * - Link reset which cannot be performed on interrupt context due to
3212*4882a593Smuzhiyun  * blocking up to a few seconds
3213*4882a593Smuzhiyun  * - Rescan the host
3214*4882a593Smuzhiyun  */
cxlflash_worker_thread(struct work_struct * work)3215*4882a593Smuzhiyun static void cxlflash_worker_thread(struct work_struct *work)
3216*4882a593Smuzhiyun {
3217*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3218*4882a593Smuzhiyun 						work_q);
3219*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
3220*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3221*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
3222*4882a593Smuzhiyun 	int port;
3223*4882a593Smuzhiyun 	ulong lock_flags;
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	/* Avoid MMIO if the device has failed */
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	if (cfg->state != STATE_NORMAL)
3228*4882a593Smuzhiyun 		return;
3229*4882a593Smuzhiyun 
3230*4882a593Smuzhiyun 	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3231*4882a593Smuzhiyun 
3232*4882a593Smuzhiyun 	if (cfg->lr_state == LINK_RESET_REQUIRED) {
3233*4882a593Smuzhiyun 		port = cfg->lr_port;
3234*4882a593Smuzhiyun 		if (port < 0)
3235*4882a593Smuzhiyun 			dev_err(dev, "%s: invalid port index %d\n",
3236*4882a593Smuzhiyun 				__func__, port);
3237*4882a593Smuzhiyun 		else {
3238*4882a593Smuzhiyun 			spin_unlock_irqrestore(cfg->host->host_lock,
3239*4882a593Smuzhiyun 					       lock_flags);
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 			/* The reset can block... */
3242*4882a593Smuzhiyun 			fc_port_regs = get_fc_port_regs(cfg, port);
3243*4882a593Smuzhiyun 			afu_link_reset(afu, port, fc_port_regs);
3244*4882a593Smuzhiyun 			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3245*4882a593Smuzhiyun 		}
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 		cfg->lr_state = LINK_RESET_COMPLETE;
3248*4882a593Smuzhiyun 	}
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun 	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3251*4882a593Smuzhiyun 
3252*4882a593Smuzhiyun 	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3253*4882a593Smuzhiyun 		scsi_scan_host(cfg->host);
3254*4882a593Smuzhiyun }
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun /**
3257*4882a593Smuzhiyun  * cxlflash_chr_open() - character device open handler
3258*4882a593Smuzhiyun  * @inode:	Device inode associated with this character device.
3259*4882a593Smuzhiyun  * @file:	File pointer for this device.
3260*4882a593Smuzhiyun  *
3261*4882a593Smuzhiyun  * Only users with admin privileges are allowed to open the character device.
3262*4882a593Smuzhiyun  *
3263*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3264*4882a593Smuzhiyun  */
cxlflash_chr_open(struct inode * inode,struct file * file)3265*4882a593Smuzhiyun static int cxlflash_chr_open(struct inode *inode, struct file *file)
3266*4882a593Smuzhiyun {
3267*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg;
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 	if (!capable(CAP_SYS_ADMIN))
3270*4882a593Smuzhiyun 		return -EACCES;
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun 	cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3273*4882a593Smuzhiyun 	file->private_data = cfg;
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun 	return 0;
3276*4882a593Smuzhiyun }
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun /**
3279*4882a593Smuzhiyun  * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3280*4882a593Smuzhiyun  * @cmd:        The host ioctl command to decode.
3281*4882a593Smuzhiyun  *
3282*4882a593Smuzhiyun  * Return: A string identifying the decoded host ioctl.
3283*4882a593Smuzhiyun  */
decode_hioctl(unsigned int cmd)3284*4882a593Smuzhiyun static char *decode_hioctl(unsigned int cmd)
3285*4882a593Smuzhiyun {
3286*4882a593Smuzhiyun 	switch (cmd) {
3287*4882a593Smuzhiyun 	case HT_CXLFLASH_LUN_PROVISION:
3288*4882a593Smuzhiyun 		return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3289*4882a593Smuzhiyun 	}
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 	return "UNKNOWN";
3292*4882a593Smuzhiyun }
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun /**
3295*4882a593Smuzhiyun  * cxlflash_lun_provision() - host LUN provisioning handler
3296*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
3297*4882a593Smuzhiyun  * @arg:	Kernel copy of userspace ioctl data structure.
3298*4882a593Smuzhiyun  *
3299*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3300*4882a593Smuzhiyun  */
cxlflash_lun_provision(struct cxlflash_cfg * cfg,struct ht_cxlflash_lun_provision * lunprov)3301*4882a593Smuzhiyun static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3302*4882a593Smuzhiyun 				  struct ht_cxlflash_lun_provision *lunprov)
3303*4882a593Smuzhiyun {
3304*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
3305*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3306*4882a593Smuzhiyun 	struct sisl_ioarcb rcb;
3307*4882a593Smuzhiyun 	struct sisl_ioasa asa;
3308*4882a593Smuzhiyun 	__be64 __iomem *fc_port_regs;
3309*4882a593Smuzhiyun 	u16 port = lunprov->port;
3310*4882a593Smuzhiyun 	u16 scmd = lunprov->hdr.subcmd;
3311*4882a593Smuzhiyun 	u16 type;
3312*4882a593Smuzhiyun 	u64 reg;
3313*4882a593Smuzhiyun 	u64 size;
3314*4882a593Smuzhiyun 	u64 lun_id;
3315*4882a593Smuzhiyun 	int rc = 0;
3316*4882a593Smuzhiyun 
3317*4882a593Smuzhiyun 	if (!afu_is_lun_provision(afu)) {
3318*4882a593Smuzhiyun 		rc = -ENOTSUPP;
3319*4882a593Smuzhiyun 		goto out;
3320*4882a593Smuzhiyun 	}
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 	if (port >= cfg->num_fc_ports) {
3323*4882a593Smuzhiyun 		rc = -EINVAL;
3324*4882a593Smuzhiyun 		goto out;
3325*4882a593Smuzhiyun 	}
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	switch (scmd) {
3328*4882a593Smuzhiyun 	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3329*4882a593Smuzhiyun 		type = SISL_AFU_LUN_PROVISION_CREATE;
3330*4882a593Smuzhiyun 		size = lunprov->size;
3331*4882a593Smuzhiyun 		lun_id = 0;
3332*4882a593Smuzhiyun 		break;
3333*4882a593Smuzhiyun 	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3334*4882a593Smuzhiyun 		type = SISL_AFU_LUN_PROVISION_DELETE;
3335*4882a593Smuzhiyun 		size = 0;
3336*4882a593Smuzhiyun 		lun_id = lunprov->lun_id;
3337*4882a593Smuzhiyun 		break;
3338*4882a593Smuzhiyun 	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3339*4882a593Smuzhiyun 		fc_port_regs = get_fc_port_regs(cfg, port);
3340*4882a593Smuzhiyun 
3341*4882a593Smuzhiyun 		reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3342*4882a593Smuzhiyun 		lunprov->max_num_luns = reg;
3343*4882a593Smuzhiyun 		reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3344*4882a593Smuzhiyun 		lunprov->cur_num_luns = reg;
3345*4882a593Smuzhiyun 		reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3346*4882a593Smuzhiyun 		lunprov->max_cap_port = reg;
3347*4882a593Smuzhiyun 		reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3348*4882a593Smuzhiyun 		lunprov->cur_cap_port = reg;
3349*4882a593Smuzhiyun 
3350*4882a593Smuzhiyun 		goto out;
3351*4882a593Smuzhiyun 	default:
3352*4882a593Smuzhiyun 		rc = -EINVAL;
3353*4882a593Smuzhiyun 		goto out;
3354*4882a593Smuzhiyun 	}
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun 	memset(&rcb, 0, sizeof(rcb));
3357*4882a593Smuzhiyun 	memset(&asa, 0, sizeof(asa));
3358*4882a593Smuzhiyun 	rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3359*4882a593Smuzhiyun 	rcb.lun_id = lun_id;
3360*4882a593Smuzhiyun 	rcb.msi = SISL_MSI_RRQ_UPDATED;
3361*4882a593Smuzhiyun 	rcb.timeout = MC_LUN_PROV_TIMEOUT;
3362*4882a593Smuzhiyun 	rcb.ioasa = &asa;
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 	rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3365*4882a593Smuzhiyun 	rcb.cdb[1] = type;
3366*4882a593Smuzhiyun 	rcb.cdb[2] = port;
3367*4882a593Smuzhiyun 	put_unaligned_be64(size, &rcb.cdb[8]);
3368*4882a593Smuzhiyun 
3369*4882a593Smuzhiyun 	rc = send_afu_cmd(afu, &rcb);
3370*4882a593Smuzhiyun 	if (rc) {
3371*4882a593Smuzhiyun 		dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3372*4882a593Smuzhiyun 			__func__, rc, asa.ioasc, asa.afu_extra);
3373*4882a593Smuzhiyun 		goto out;
3374*4882a593Smuzhiyun 	}
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 	if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3377*4882a593Smuzhiyun 		lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3378*4882a593Smuzhiyun 		memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3379*4882a593Smuzhiyun 	}
3380*4882a593Smuzhiyun out:
3381*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3382*4882a593Smuzhiyun 	return rc;
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun 
3385*4882a593Smuzhiyun /**
3386*4882a593Smuzhiyun  * cxlflash_afu_debug() - host AFU debug handler
3387*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
3388*4882a593Smuzhiyun  * @arg:	Kernel copy of userspace ioctl data structure.
3389*4882a593Smuzhiyun  *
3390*4882a593Smuzhiyun  * For debug requests requiring a data buffer, always provide an aligned
3391*4882a593Smuzhiyun  * (cache line) buffer to the AFU to appease any alignment requirements.
3392*4882a593Smuzhiyun  *
3393*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3394*4882a593Smuzhiyun  */
cxlflash_afu_debug(struct cxlflash_cfg * cfg,struct ht_cxlflash_afu_debug * afu_dbg)3395*4882a593Smuzhiyun static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3396*4882a593Smuzhiyun 			      struct ht_cxlflash_afu_debug *afu_dbg)
3397*4882a593Smuzhiyun {
3398*4882a593Smuzhiyun 	struct afu *afu = cfg->afu;
3399*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3400*4882a593Smuzhiyun 	struct sisl_ioarcb rcb;
3401*4882a593Smuzhiyun 	struct sisl_ioasa asa;
3402*4882a593Smuzhiyun 	char *buf = NULL;
3403*4882a593Smuzhiyun 	char *kbuf = NULL;
3404*4882a593Smuzhiyun 	void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3405*4882a593Smuzhiyun 	u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3406*4882a593Smuzhiyun 	u32 ulen = afu_dbg->data_len;
3407*4882a593Smuzhiyun 	bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3408*4882a593Smuzhiyun 	int rc = 0;
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun 	if (!afu_is_afu_debug(afu)) {
3411*4882a593Smuzhiyun 		rc = -ENOTSUPP;
3412*4882a593Smuzhiyun 		goto out;
3413*4882a593Smuzhiyun 	}
3414*4882a593Smuzhiyun 
3415*4882a593Smuzhiyun 	if (ulen) {
3416*4882a593Smuzhiyun 		req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3417*4882a593Smuzhiyun 
3418*4882a593Smuzhiyun 		if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3419*4882a593Smuzhiyun 			rc = -EINVAL;
3420*4882a593Smuzhiyun 			goto out;
3421*4882a593Smuzhiyun 		}
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 		buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3424*4882a593Smuzhiyun 		if (unlikely(!buf)) {
3425*4882a593Smuzhiyun 			rc = -ENOMEM;
3426*4882a593Smuzhiyun 			goto out;
3427*4882a593Smuzhiyun 		}
3428*4882a593Smuzhiyun 
3429*4882a593Smuzhiyun 		kbuf = PTR_ALIGN(buf, cache_line_size());
3430*4882a593Smuzhiyun 
3431*4882a593Smuzhiyun 		if (is_write) {
3432*4882a593Smuzhiyun 			req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 			if (copy_from_user(kbuf, ubuf, ulen)) {
3435*4882a593Smuzhiyun 				rc = -EFAULT;
3436*4882a593Smuzhiyun 				goto out;
3437*4882a593Smuzhiyun 			}
3438*4882a593Smuzhiyun 		}
3439*4882a593Smuzhiyun 	}
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 	memset(&rcb, 0, sizeof(rcb));
3442*4882a593Smuzhiyun 	memset(&asa, 0, sizeof(asa));
3443*4882a593Smuzhiyun 
3444*4882a593Smuzhiyun 	rcb.req_flags = req_flags;
3445*4882a593Smuzhiyun 	rcb.msi = SISL_MSI_RRQ_UPDATED;
3446*4882a593Smuzhiyun 	rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3447*4882a593Smuzhiyun 	rcb.ioasa = &asa;
3448*4882a593Smuzhiyun 
3449*4882a593Smuzhiyun 	if (ulen) {
3450*4882a593Smuzhiyun 		rcb.data_len = ulen;
3451*4882a593Smuzhiyun 		rcb.data_ea = (uintptr_t)kbuf;
3452*4882a593Smuzhiyun 	}
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 	rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3455*4882a593Smuzhiyun 	memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3456*4882a593Smuzhiyun 	       HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3457*4882a593Smuzhiyun 
3458*4882a593Smuzhiyun 	rc = send_afu_cmd(afu, &rcb);
3459*4882a593Smuzhiyun 	if (rc) {
3460*4882a593Smuzhiyun 		dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3461*4882a593Smuzhiyun 			__func__, rc, asa.ioasc, asa.afu_extra);
3462*4882a593Smuzhiyun 		goto out;
3463*4882a593Smuzhiyun 	}
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 	if (ulen && !is_write) {
3466*4882a593Smuzhiyun 		if (copy_to_user(ubuf, kbuf, ulen))
3467*4882a593Smuzhiyun 			rc = -EFAULT;
3468*4882a593Smuzhiyun 	}
3469*4882a593Smuzhiyun out:
3470*4882a593Smuzhiyun 	kfree(buf);
3471*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3472*4882a593Smuzhiyun 	return rc;
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun 
3475*4882a593Smuzhiyun /**
3476*4882a593Smuzhiyun  * cxlflash_chr_ioctl() - character device IOCTL handler
3477*4882a593Smuzhiyun  * @file:	File pointer for this device.
3478*4882a593Smuzhiyun  * @cmd:	IOCTL command.
3479*4882a593Smuzhiyun  * @arg:	Userspace ioctl data structure.
3480*4882a593Smuzhiyun  *
3481*4882a593Smuzhiyun  * A read/write semaphore is used to implement a 'drain' of currently
3482*4882a593Smuzhiyun  * running ioctls. The read semaphore is taken at the beginning of each
3483*4882a593Smuzhiyun  * ioctl thread and released upon concluding execution. Additionally the
3484*4882a593Smuzhiyun  * semaphore should be released and then reacquired in any ioctl execution
3485*4882a593Smuzhiyun  * path which will wait for an event to occur that is outside the scope of
3486*4882a593Smuzhiyun  * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3487*4882a593Smuzhiyun  * a thread simply needs to acquire the write semaphore.
3488*4882a593Smuzhiyun  *
3489*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3490*4882a593Smuzhiyun  */
cxlflash_chr_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3491*4882a593Smuzhiyun static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3492*4882a593Smuzhiyun 			       unsigned long arg)
3493*4882a593Smuzhiyun {
3494*4882a593Smuzhiyun 	typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3495*4882a593Smuzhiyun 
3496*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = file->private_data;
3497*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3498*4882a593Smuzhiyun 	char buf[sizeof(union cxlflash_ht_ioctls)];
3499*4882a593Smuzhiyun 	void __user *uarg = (void __user *)arg;
3500*4882a593Smuzhiyun 	struct ht_cxlflash_hdr *hdr;
3501*4882a593Smuzhiyun 	size_t size = 0;
3502*4882a593Smuzhiyun 	bool known_ioctl = false;
3503*4882a593Smuzhiyun 	int idx = 0;
3504*4882a593Smuzhiyun 	int rc = 0;
3505*4882a593Smuzhiyun 	hioctl do_ioctl = NULL;
3506*4882a593Smuzhiyun 
3507*4882a593Smuzhiyun 	static const struct {
3508*4882a593Smuzhiyun 		size_t size;
3509*4882a593Smuzhiyun 		hioctl ioctl;
3510*4882a593Smuzhiyun 	} ioctl_tbl[] = {	/* NOTE: order matters here */
3511*4882a593Smuzhiyun 	{ sizeof(struct ht_cxlflash_lun_provision),
3512*4882a593Smuzhiyun 		(hioctl)cxlflash_lun_provision },
3513*4882a593Smuzhiyun 	{ sizeof(struct ht_cxlflash_afu_debug),
3514*4882a593Smuzhiyun 		(hioctl)cxlflash_afu_debug },
3515*4882a593Smuzhiyun 	};
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 	/* Hold read semaphore so we can drain if needed */
3518*4882a593Smuzhiyun 	down_read(&cfg->ioctl_rwsem);
3519*4882a593Smuzhiyun 
3520*4882a593Smuzhiyun 	dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3521*4882a593Smuzhiyun 		__func__, cmd, idx, sizeof(ioctl_tbl));
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	switch (cmd) {
3524*4882a593Smuzhiyun 	case HT_CXLFLASH_LUN_PROVISION:
3525*4882a593Smuzhiyun 	case HT_CXLFLASH_AFU_DEBUG:
3526*4882a593Smuzhiyun 		known_ioctl = true;
3527*4882a593Smuzhiyun 		idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3528*4882a593Smuzhiyun 		size = ioctl_tbl[idx].size;
3529*4882a593Smuzhiyun 		do_ioctl = ioctl_tbl[idx].ioctl;
3530*4882a593Smuzhiyun 
3531*4882a593Smuzhiyun 		if (likely(do_ioctl))
3532*4882a593Smuzhiyun 			break;
3533*4882a593Smuzhiyun 
3534*4882a593Smuzhiyun 		fallthrough;
3535*4882a593Smuzhiyun 	default:
3536*4882a593Smuzhiyun 		rc = -EINVAL;
3537*4882a593Smuzhiyun 		goto out;
3538*4882a593Smuzhiyun 	}
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun 	if (unlikely(copy_from_user(&buf, uarg, size))) {
3541*4882a593Smuzhiyun 		dev_err(dev, "%s: copy_from_user() fail "
3542*4882a593Smuzhiyun 			"size=%lu cmd=%d (%s) uarg=%p\n",
3543*4882a593Smuzhiyun 			__func__, size, cmd, decode_hioctl(cmd), uarg);
3544*4882a593Smuzhiyun 		rc = -EFAULT;
3545*4882a593Smuzhiyun 		goto out;
3546*4882a593Smuzhiyun 	}
3547*4882a593Smuzhiyun 
3548*4882a593Smuzhiyun 	hdr = (struct ht_cxlflash_hdr *)&buf;
3549*4882a593Smuzhiyun 	if (hdr->version != HT_CXLFLASH_VERSION_0) {
3550*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Version %u not supported for %s\n",
3551*4882a593Smuzhiyun 			__func__, hdr->version, decode_hioctl(cmd));
3552*4882a593Smuzhiyun 		rc = -EINVAL;
3553*4882a593Smuzhiyun 		goto out;
3554*4882a593Smuzhiyun 	}
3555*4882a593Smuzhiyun 
3556*4882a593Smuzhiyun 	if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3557*4882a593Smuzhiyun 		dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3558*4882a593Smuzhiyun 		rc = -EINVAL;
3559*4882a593Smuzhiyun 		goto out;
3560*4882a593Smuzhiyun 	}
3561*4882a593Smuzhiyun 
3562*4882a593Smuzhiyun 	rc = do_ioctl(cfg, (void *)&buf);
3563*4882a593Smuzhiyun 	if (likely(!rc))
3564*4882a593Smuzhiyun 		if (unlikely(copy_to_user(uarg, &buf, size))) {
3565*4882a593Smuzhiyun 			dev_err(dev, "%s: copy_to_user() fail "
3566*4882a593Smuzhiyun 				"size=%lu cmd=%d (%s) uarg=%p\n",
3567*4882a593Smuzhiyun 				__func__, size, cmd, decode_hioctl(cmd), uarg);
3568*4882a593Smuzhiyun 			rc = -EFAULT;
3569*4882a593Smuzhiyun 		}
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 	/* fall through to exit */
3572*4882a593Smuzhiyun 
3573*4882a593Smuzhiyun out:
3574*4882a593Smuzhiyun 	up_read(&cfg->ioctl_rwsem);
3575*4882a593Smuzhiyun 	if (unlikely(rc && known_ioctl))
3576*4882a593Smuzhiyun 		dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3577*4882a593Smuzhiyun 			__func__, decode_hioctl(cmd), cmd, rc);
3578*4882a593Smuzhiyun 	else
3579*4882a593Smuzhiyun 		dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3580*4882a593Smuzhiyun 			__func__, decode_hioctl(cmd), cmd, rc);
3581*4882a593Smuzhiyun 	return rc;
3582*4882a593Smuzhiyun }
3583*4882a593Smuzhiyun 
3584*4882a593Smuzhiyun /*
3585*4882a593Smuzhiyun  * Character device file operations
3586*4882a593Smuzhiyun  */
3587*4882a593Smuzhiyun static const struct file_operations cxlflash_chr_fops = {
3588*4882a593Smuzhiyun 	.owner          = THIS_MODULE,
3589*4882a593Smuzhiyun 	.open           = cxlflash_chr_open,
3590*4882a593Smuzhiyun 	.unlocked_ioctl	= cxlflash_chr_ioctl,
3591*4882a593Smuzhiyun 	.compat_ioctl	= compat_ptr_ioctl,
3592*4882a593Smuzhiyun };
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun /**
3595*4882a593Smuzhiyun  * init_chrdev() - initialize the character device for the host
3596*4882a593Smuzhiyun  * @cfg:	Internal structure associated with the host.
3597*4882a593Smuzhiyun  *
3598*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3599*4882a593Smuzhiyun  */
init_chrdev(struct cxlflash_cfg * cfg)3600*4882a593Smuzhiyun static int init_chrdev(struct cxlflash_cfg *cfg)
3601*4882a593Smuzhiyun {
3602*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3603*4882a593Smuzhiyun 	struct device *char_dev;
3604*4882a593Smuzhiyun 	dev_t devno;
3605*4882a593Smuzhiyun 	int minor;
3606*4882a593Smuzhiyun 	int rc = 0;
3607*4882a593Smuzhiyun 
3608*4882a593Smuzhiyun 	minor = cxlflash_get_minor();
3609*4882a593Smuzhiyun 	if (unlikely(minor < 0)) {
3610*4882a593Smuzhiyun 		dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3611*4882a593Smuzhiyun 		rc = -ENOSPC;
3612*4882a593Smuzhiyun 		goto out;
3613*4882a593Smuzhiyun 	}
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun 	devno = MKDEV(cxlflash_major, minor);
3616*4882a593Smuzhiyun 	cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 	rc = cdev_add(&cfg->cdev, devno, 1);
3619*4882a593Smuzhiyun 	if (rc) {
3620*4882a593Smuzhiyun 		dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3621*4882a593Smuzhiyun 		goto err1;
3622*4882a593Smuzhiyun 	}
3623*4882a593Smuzhiyun 
3624*4882a593Smuzhiyun 	char_dev = device_create(cxlflash_class, NULL, devno,
3625*4882a593Smuzhiyun 				 NULL, "cxlflash%d", minor);
3626*4882a593Smuzhiyun 	if (IS_ERR(char_dev)) {
3627*4882a593Smuzhiyun 		rc = PTR_ERR(char_dev);
3628*4882a593Smuzhiyun 		dev_err(dev, "%s: device_create failed rc=%d\n",
3629*4882a593Smuzhiyun 			__func__, rc);
3630*4882a593Smuzhiyun 		goto err2;
3631*4882a593Smuzhiyun 	}
3632*4882a593Smuzhiyun 
3633*4882a593Smuzhiyun 	cfg->chardev = char_dev;
3634*4882a593Smuzhiyun out:
3635*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3636*4882a593Smuzhiyun 	return rc;
3637*4882a593Smuzhiyun err2:
3638*4882a593Smuzhiyun 	cdev_del(&cfg->cdev);
3639*4882a593Smuzhiyun err1:
3640*4882a593Smuzhiyun 	cxlflash_put_minor(minor);
3641*4882a593Smuzhiyun 	goto out;
3642*4882a593Smuzhiyun }
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun /**
3645*4882a593Smuzhiyun  * cxlflash_probe() - PCI entry point to add host
3646*4882a593Smuzhiyun  * @pdev:	PCI device associated with the host.
3647*4882a593Smuzhiyun  * @dev_id:	PCI device id associated with device.
3648*4882a593Smuzhiyun  *
3649*4882a593Smuzhiyun  * The device will initially start out in a 'probing' state and
3650*4882a593Smuzhiyun  * transition to the 'normal' state at the end of a successful
3651*4882a593Smuzhiyun  * probe. Should an EEH event occur during probe, the notification
3652*4882a593Smuzhiyun  * thread (error_detected()) will wait until the probe handler
3653*4882a593Smuzhiyun  * is nearly complete. At that time, the device will be moved to
3654*4882a593Smuzhiyun  * a 'probed' state and the EEH thread woken up to drive the slot
3655*4882a593Smuzhiyun  * reset and recovery (device moves to 'normal' state). Meanwhile,
3656*4882a593Smuzhiyun  * the probe will be allowed to exit successfully.
3657*4882a593Smuzhiyun  *
3658*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3659*4882a593Smuzhiyun  */
cxlflash_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)3660*4882a593Smuzhiyun static int cxlflash_probe(struct pci_dev *pdev,
3661*4882a593Smuzhiyun 			  const struct pci_device_id *dev_id)
3662*4882a593Smuzhiyun {
3663*4882a593Smuzhiyun 	struct Scsi_Host *host;
3664*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = NULL;
3665*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
3666*4882a593Smuzhiyun 	struct dev_dependent_vals *ddv;
3667*4882a593Smuzhiyun 	int rc = 0;
3668*4882a593Smuzhiyun 	int k;
3669*4882a593Smuzhiyun 
3670*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3671*4882a593Smuzhiyun 		__func__, pdev->irq);
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3674*4882a593Smuzhiyun 	driver_template.max_sectors = ddv->max_sectors;
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3677*4882a593Smuzhiyun 	if (!host) {
3678*4882a593Smuzhiyun 		dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3679*4882a593Smuzhiyun 		rc = -ENOMEM;
3680*4882a593Smuzhiyun 		goto out;
3681*4882a593Smuzhiyun 	}
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun 	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3684*4882a593Smuzhiyun 	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3685*4882a593Smuzhiyun 	host->unique_id = host->host_no;
3686*4882a593Smuzhiyun 	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun 	cfg = shost_priv(host);
3689*4882a593Smuzhiyun 	cfg->state = STATE_PROBING;
3690*4882a593Smuzhiyun 	cfg->host = host;
3691*4882a593Smuzhiyun 	rc = alloc_mem(cfg);
3692*4882a593Smuzhiyun 	if (rc) {
3693*4882a593Smuzhiyun 		dev_err(dev, "%s: alloc_mem failed\n", __func__);
3694*4882a593Smuzhiyun 		rc = -ENOMEM;
3695*4882a593Smuzhiyun 		scsi_host_put(cfg->host);
3696*4882a593Smuzhiyun 		goto out;
3697*4882a593Smuzhiyun 	}
3698*4882a593Smuzhiyun 
3699*4882a593Smuzhiyun 	cfg->init_state = INIT_STATE_NONE;
3700*4882a593Smuzhiyun 	cfg->dev = pdev;
3701*4882a593Smuzhiyun 	cfg->cxl_fops = cxlflash_cxl_fops;
3702*4882a593Smuzhiyun 	cfg->ops = cxlflash_assign_ops(ddv);
3703*4882a593Smuzhiyun 	WARN_ON_ONCE(!cfg->ops);
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun 	/*
3706*4882a593Smuzhiyun 	 * Promoted LUNs move to the top of the LUN table. The rest stay on
3707*4882a593Smuzhiyun 	 * the bottom half. The bottom half grows from the end (index = 255),
3708*4882a593Smuzhiyun 	 * whereas the top half grows from the beginning (index = 0).
3709*4882a593Smuzhiyun 	 *
3710*4882a593Smuzhiyun 	 * Initialize the last LUN index for all possible ports.
3711*4882a593Smuzhiyun 	 */
3712*4882a593Smuzhiyun 	cfg->promote_lun_index = 0;
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun 	for (k = 0; k < MAX_FC_PORTS; k++)
3715*4882a593Smuzhiyun 		cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun 	cfg->dev_id = (struct pci_device_id *)dev_id;
3718*4882a593Smuzhiyun 
3719*4882a593Smuzhiyun 	init_waitqueue_head(&cfg->tmf_waitq);
3720*4882a593Smuzhiyun 	init_waitqueue_head(&cfg->reset_waitq);
3721*4882a593Smuzhiyun 
3722*4882a593Smuzhiyun 	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3723*4882a593Smuzhiyun 	cfg->lr_state = LINK_RESET_INVALID;
3724*4882a593Smuzhiyun 	cfg->lr_port = -1;
3725*4882a593Smuzhiyun 	spin_lock_init(&cfg->tmf_slock);
3726*4882a593Smuzhiyun 	mutex_init(&cfg->ctx_tbl_list_mutex);
3727*4882a593Smuzhiyun 	mutex_init(&cfg->ctx_recovery_mutex);
3728*4882a593Smuzhiyun 	init_rwsem(&cfg->ioctl_rwsem);
3729*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3730*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cfg->lluns);
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun 	pci_set_drvdata(pdev, cfg);
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun 	rc = init_pci(cfg);
3735*4882a593Smuzhiyun 	if (rc) {
3736*4882a593Smuzhiyun 		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3737*4882a593Smuzhiyun 		goto out_remove;
3738*4882a593Smuzhiyun 	}
3739*4882a593Smuzhiyun 	cfg->init_state = INIT_STATE_PCI;
3740*4882a593Smuzhiyun 
3741*4882a593Smuzhiyun 	cfg->afu_cookie = cfg->ops->create_afu(pdev);
3742*4882a593Smuzhiyun 	if (unlikely(!cfg->afu_cookie)) {
3743*4882a593Smuzhiyun 		dev_err(dev, "%s: create_afu failed\n", __func__);
3744*4882a593Smuzhiyun 		rc = -ENOMEM;
3745*4882a593Smuzhiyun 		goto out_remove;
3746*4882a593Smuzhiyun 	}
3747*4882a593Smuzhiyun 
3748*4882a593Smuzhiyun 	rc = init_afu(cfg);
3749*4882a593Smuzhiyun 	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3750*4882a593Smuzhiyun 		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3751*4882a593Smuzhiyun 		goto out_remove;
3752*4882a593Smuzhiyun 	}
3753*4882a593Smuzhiyun 	cfg->init_state = INIT_STATE_AFU;
3754*4882a593Smuzhiyun 
3755*4882a593Smuzhiyun 	rc = init_scsi(cfg);
3756*4882a593Smuzhiyun 	if (rc) {
3757*4882a593Smuzhiyun 		dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3758*4882a593Smuzhiyun 		goto out_remove;
3759*4882a593Smuzhiyun 	}
3760*4882a593Smuzhiyun 	cfg->init_state = INIT_STATE_SCSI;
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	rc = init_chrdev(cfg);
3763*4882a593Smuzhiyun 	if (rc) {
3764*4882a593Smuzhiyun 		dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3765*4882a593Smuzhiyun 		goto out_remove;
3766*4882a593Smuzhiyun 	}
3767*4882a593Smuzhiyun 	cfg->init_state = INIT_STATE_CDEV;
3768*4882a593Smuzhiyun 
3769*4882a593Smuzhiyun 	if (wq_has_sleeper(&cfg->reset_waitq)) {
3770*4882a593Smuzhiyun 		cfg->state = STATE_PROBED;
3771*4882a593Smuzhiyun 		wake_up_all(&cfg->reset_waitq);
3772*4882a593Smuzhiyun 	} else
3773*4882a593Smuzhiyun 		cfg->state = STATE_NORMAL;
3774*4882a593Smuzhiyun out:
3775*4882a593Smuzhiyun 	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3776*4882a593Smuzhiyun 	return rc;
3777*4882a593Smuzhiyun 
3778*4882a593Smuzhiyun out_remove:
3779*4882a593Smuzhiyun 	cfg->state = STATE_PROBED;
3780*4882a593Smuzhiyun 	cxlflash_remove(pdev);
3781*4882a593Smuzhiyun 	goto out;
3782*4882a593Smuzhiyun }
3783*4882a593Smuzhiyun 
3784*4882a593Smuzhiyun /**
3785*4882a593Smuzhiyun  * cxlflash_pci_error_detected() - called when a PCI error is detected
3786*4882a593Smuzhiyun  * @pdev:	PCI device struct.
3787*4882a593Smuzhiyun  * @state:	PCI channel state.
3788*4882a593Smuzhiyun  *
3789*4882a593Smuzhiyun  * When an EEH occurs during an active reset, wait until the reset is
3790*4882a593Smuzhiyun  * complete and then take action based upon the device state.
3791*4882a593Smuzhiyun  *
3792*4882a593Smuzhiyun  * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3793*4882a593Smuzhiyun  */
cxlflash_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)3794*4882a593Smuzhiyun static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3795*4882a593Smuzhiyun 						    pci_channel_state_t state)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	int rc = 0;
3798*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3799*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3800*4882a593Smuzhiyun 
3801*4882a593Smuzhiyun 	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	switch (state) {
3804*4882a593Smuzhiyun 	case pci_channel_io_frozen:
3805*4882a593Smuzhiyun 		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3806*4882a593Smuzhiyun 					     cfg->state != STATE_PROBING);
3807*4882a593Smuzhiyun 		if (cfg->state == STATE_FAILTERM)
3808*4882a593Smuzhiyun 			return PCI_ERS_RESULT_DISCONNECT;
3809*4882a593Smuzhiyun 
3810*4882a593Smuzhiyun 		cfg->state = STATE_RESET;
3811*4882a593Smuzhiyun 		scsi_block_requests(cfg->host);
3812*4882a593Smuzhiyun 		drain_ioctls(cfg);
3813*4882a593Smuzhiyun 		rc = cxlflash_mark_contexts_error(cfg);
3814*4882a593Smuzhiyun 		if (unlikely(rc))
3815*4882a593Smuzhiyun 			dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3816*4882a593Smuzhiyun 				__func__, rc);
3817*4882a593Smuzhiyun 		term_afu(cfg);
3818*4882a593Smuzhiyun 		return PCI_ERS_RESULT_NEED_RESET;
3819*4882a593Smuzhiyun 	case pci_channel_io_perm_failure:
3820*4882a593Smuzhiyun 		cfg->state = STATE_FAILTERM;
3821*4882a593Smuzhiyun 		wake_up_all(&cfg->reset_waitq);
3822*4882a593Smuzhiyun 		scsi_unblock_requests(cfg->host);
3823*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
3824*4882a593Smuzhiyun 	default:
3825*4882a593Smuzhiyun 		break;
3826*4882a593Smuzhiyun 	}
3827*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
3828*4882a593Smuzhiyun }
3829*4882a593Smuzhiyun 
3830*4882a593Smuzhiyun /**
3831*4882a593Smuzhiyun  * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3832*4882a593Smuzhiyun  * @pdev:	PCI device struct.
3833*4882a593Smuzhiyun  *
3834*4882a593Smuzhiyun  * This routine is called by the pci error recovery code after the PCI
3835*4882a593Smuzhiyun  * slot has been reset, just before we should resume normal operations.
3836*4882a593Smuzhiyun  *
3837*4882a593Smuzhiyun  * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3838*4882a593Smuzhiyun  */
cxlflash_pci_slot_reset(struct pci_dev * pdev)3839*4882a593Smuzhiyun static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3840*4882a593Smuzhiyun {
3841*4882a593Smuzhiyun 	int rc = 0;
3842*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3843*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3844*4882a593Smuzhiyun 
3845*4882a593Smuzhiyun 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3846*4882a593Smuzhiyun 
3847*4882a593Smuzhiyun 	rc = init_afu(cfg);
3848*4882a593Smuzhiyun 	if (unlikely(rc)) {
3849*4882a593Smuzhiyun 		dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3850*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
3851*4882a593Smuzhiyun 	}
3852*4882a593Smuzhiyun 
3853*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
3854*4882a593Smuzhiyun }
3855*4882a593Smuzhiyun 
3856*4882a593Smuzhiyun /**
3857*4882a593Smuzhiyun  * cxlflash_pci_resume() - called when normal operation can resume
3858*4882a593Smuzhiyun  * @pdev:	PCI device struct
3859*4882a593Smuzhiyun  */
cxlflash_pci_resume(struct pci_dev * pdev)3860*4882a593Smuzhiyun static void cxlflash_pci_resume(struct pci_dev *pdev)
3861*4882a593Smuzhiyun {
3862*4882a593Smuzhiyun 	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3863*4882a593Smuzhiyun 	struct device *dev = &cfg->dev->dev;
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun 	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun 	cfg->state = STATE_NORMAL;
3868*4882a593Smuzhiyun 	wake_up_all(&cfg->reset_waitq);
3869*4882a593Smuzhiyun 	scsi_unblock_requests(cfg->host);
3870*4882a593Smuzhiyun }
3871*4882a593Smuzhiyun 
3872*4882a593Smuzhiyun /**
3873*4882a593Smuzhiyun  * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3874*4882a593Smuzhiyun  * @dev:	Character device.
3875*4882a593Smuzhiyun  * @mode:	Mode that can be used to verify access.
3876*4882a593Smuzhiyun  *
3877*4882a593Smuzhiyun  * Return: Allocated string describing the devtmpfs structure.
3878*4882a593Smuzhiyun  */
cxlflash_devnode(struct device * dev,umode_t * mode)3879*4882a593Smuzhiyun static char *cxlflash_devnode(struct device *dev, umode_t *mode)
3880*4882a593Smuzhiyun {
3881*4882a593Smuzhiyun 	return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3882*4882a593Smuzhiyun }
3883*4882a593Smuzhiyun 
3884*4882a593Smuzhiyun /**
3885*4882a593Smuzhiyun  * cxlflash_class_init() - create character device class
3886*4882a593Smuzhiyun  *
3887*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3888*4882a593Smuzhiyun  */
cxlflash_class_init(void)3889*4882a593Smuzhiyun static int cxlflash_class_init(void)
3890*4882a593Smuzhiyun {
3891*4882a593Smuzhiyun 	dev_t devno;
3892*4882a593Smuzhiyun 	int rc = 0;
3893*4882a593Smuzhiyun 
3894*4882a593Smuzhiyun 	rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3895*4882a593Smuzhiyun 	if (unlikely(rc)) {
3896*4882a593Smuzhiyun 		pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3897*4882a593Smuzhiyun 		goto out;
3898*4882a593Smuzhiyun 	}
3899*4882a593Smuzhiyun 
3900*4882a593Smuzhiyun 	cxlflash_major = MAJOR(devno);
3901*4882a593Smuzhiyun 
3902*4882a593Smuzhiyun 	cxlflash_class = class_create(THIS_MODULE, "cxlflash");
3903*4882a593Smuzhiyun 	if (IS_ERR(cxlflash_class)) {
3904*4882a593Smuzhiyun 		rc = PTR_ERR(cxlflash_class);
3905*4882a593Smuzhiyun 		pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3906*4882a593Smuzhiyun 		goto err;
3907*4882a593Smuzhiyun 	}
3908*4882a593Smuzhiyun 
3909*4882a593Smuzhiyun 	cxlflash_class->devnode = cxlflash_devnode;
3910*4882a593Smuzhiyun out:
3911*4882a593Smuzhiyun 	pr_debug("%s: returning rc=%d\n", __func__, rc);
3912*4882a593Smuzhiyun 	return rc;
3913*4882a593Smuzhiyun err:
3914*4882a593Smuzhiyun 	unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3915*4882a593Smuzhiyun 	goto out;
3916*4882a593Smuzhiyun }
3917*4882a593Smuzhiyun 
3918*4882a593Smuzhiyun /**
3919*4882a593Smuzhiyun  * cxlflash_class_exit() - destroy character device class
3920*4882a593Smuzhiyun  */
cxlflash_class_exit(void)3921*4882a593Smuzhiyun static void cxlflash_class_exit(void)
3922*4882a593Smuzhiyun {
3923*4882a593Smuzhiyun 	dev_t devno = MKDEV(cxlflash_major, 0);
3924*4882a593Smuzhiyun 
3925*4882a593Smuzhiyun 	class_destroy(cxlflash_class);
3926*4882a593Smuzhiyun 	unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3927*4882a593Smuzhiyun }
3928*4882a593Smuzhiyun 
3929*4882a593Smuzhiyun static const struct pci_error_handlers cxlflash_err_handler = {
3930*4882a593Smuzhiyun 	.error_detected = cxlflash_pci_error_detected,
3931*4882a593Smuzhiyun 	.slot_reset = cxlflash_pci_slot_reset,
3932*4882a593Smuzhiyun 	.resume = cxlflash_pci_resume,
3933*4882a593Smuzhiyun };
3934*4882a593Smuzhiyun 
3935*4882a593Smuzhiyun /*
3936*4882a593Smuzhiyun  * PCI device structure
3937*4882a593Smuzhiyun  */
3938*4882a593Smuzhiyun static struct pci_driver cxlflash_driver = {
3939*4882a593Smuzhiyun 	.name = CXLFLASH_NAME,
3940*4882a593Smuzhiyun 	.id_table = cxlflash_pci_table,
3941*4882a593Smuzhiyun 	.probe = cxlflash_probe,
3942*4882a593Smuzhiyun 	.remove = cxlflash_remove,
3943*4882a593Smuzhiyun 	.shutdown = cxlflash_remove,
3944*4882a593Smuzhiyun 	.err_handler = &cxlflash_err_handler,
3945*4882a593Smuzhiyun };
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun /**
3948*4882a593Smuzhiyun  * init_cxlflash() - module entry point
3949*4882a593Smuzhiyun  *
3950*4882a593Smuzhiyun  * Return: 0 on success, -errno on failure
3951*4882a593Smuzhiyun  */
init_cxlflash(void)3952*4882a593Smuzhiyun static int __init init_cxlflash(void)
3953*4882a593Smuzhiyun {
3954*4882a593Smuzhiyun 	int rc;
3955*4882a593Smuzhiyun 
3956*4882a593Smuzhiyun 	check_sizes();
3957*4882a593Smuzhiyun 	cxlflash_list_init();
3958*4882a593Smuzhiyun 	rc = cxlflash_class_init();
3959*4882a593Smuzhiyun 	if (unlikely(rc))
3960*4882a593Smuzhiyun 		goto out;
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 	rc = pci_register_driver(&cxlflash_driver);
3963*4882a593Smuzhiyun 	if (unlikely(rc))
3964*4882a593Smuzhiyun 		goto err;
3965*4882a593Smuzhiyun out:
3966*4882a593Smuzhiyun 	pr_debug("%s: returning rc=%d\n", __func__, rc);
3967*4882a593Smuzhiyun 	return rc;
3968*4882a593Smuzhiyun err:
3969*4882a593Smuzhiyun 	cxlflash_class_exit();
3970*4882a593Smuzhiyun 	goto out;
3971*4882a593Smuzhiyun }
3972*4882a593Smuzhiyun 
3973*4882a593Smuzhiyun /**
3974*4882a593Smuzhiyun  * exit_cxlflash() - module exit point
3975*4882a593Smuzhiyun  */
exit_cxlflash(void)3976*4882a593Smuzhiyun static void __exit exit_cxlflash(void)
3977*4882a593Smuzhiyun {
3978*4882a593Smuzhiyun 	cxlflash_term_global_luns();
3979*4882a593Smuzhiyun 	cxlflash_free_errpage();
3980*4882a593Smuzhiyun 
3981*4882a593Smuzhiyun 	pci_unregister_driver(&cxlflash_driver);
3982*4882a593Smuzhiyun 	cxlflash_class_exit();
3983*4882a593Smuzhiyun }
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun module_init(init_cxlflash);
3986*4882a593Smuzhiyun module_exit(exit_cxlflash);
3987