1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Linux MegaRAID driver for SAS based RAID controllers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2003-2013 LSI Corporation
6*4882a593Smuzhiyun * Copyright (c) 2013-2016 Avago Technologies
7*4882a593Smuzhiyun * Copyright (c) 2016-2018 Broadcom Inc.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Authors: Broadcom Inc.
10*4882a593Smuzhiyun * Sreenivas Bagalkote
11*4882a593Smuzhiyun * Sumant Patro
12*4882a593Smuzhiyun * Bo Yang
13*4882a593Smuzhiyun * Adam Radford
14*4882a593Smuzhiyun * Kashyap Desai <kashyap.desai@broadcom.com>
15*4882a593Smuzhiyun * Sumit Saxena <sumit.saxena@broadcom.com>
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Send feedback to: megaraidlinux.pdl@broadcom.com
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/pci.h>
23*4882a593Smuzhiyun #include <linux/list.h>
24*4882a593Smuzhiyun #include <linux/moduleparam.h>
25*4882a593Smuzhiyun #include <linux/module.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun #include <linux/interrupt.h>
28*4882a593Smuzhiyun #include <linux/delay.h>
29*4882a593Smuzhiyun #include <linux/uio.h>
30*4882a593Smuzhiyun #include <linux/slab.h>
31*4882a593Smuzhiyun #include <linux/uaccess.h>
32*4882a593Smuzhiyun #include <asm/unaligned.h>
33*4882a593Smuzhiyun #include <linux/fs.h>
34*4882a593Smuzhiyun #include <linux/compat.h>
35*4882a593Smuzhiyun #include <linux/blkdev.h>
36*4882a593Smuzhiyun #include <linux/mutex.h>
37*4882a593Smuzhiyun #include <linux/poll.h>
38*4882a593Smuzhiyun #include <linux/vmalloc.h>
39*4882a593Smuzhiyun #include <linux/irq_poll.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <scsi/scsi.h>
42*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
43*4882a593Smuzhiyun #include <scsi/scsi_device.h>
44*4882a593Smuzhiyun #include <scsi/scsi_host.h>
45*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
46*4882a593Smuzhiyun #include <scsi/scsi_dbg.h>
47*4882a593Smuzhiyun #include "megaraid_sas_fusion.h"
48*4882a593Smuzhiyun #include "megaraid_sas.h"
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Number of sectors per IO command
52*4882a593Smuzhiyun * Will be set in megasas_init_mfi if user does not provide
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun static unsigned int max_sectors;
55*4882a593Smuzhiyun module_param_named(max_sectors, max_sectors, int, 0444);
56*4882a593Smuzhiyun MODULE_PARM_DESC(max_sectors,
57*4882a593Smuzhiyun "Maximum number of sectors per IO command");
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static int msix_disable;
60*4882a593Smuzhiyun module_param(msix_disable, int, 0444);
61*4882a593Smuzhiyun MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static unsigned int msix_vectors;
64*4882a593Smuzhiyun module_param(msix_vectors, int, 0444);
65*4882a593Smuzhiyun MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun static int allow_vf_ioctls;
68*4882a593Smuzhiyun module_param(allow_vf_ioctls, int, 0444);
69*4882a593Smuzhiyun MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72*4882a593Smuzhiyun module_param(throttlequeuedepth, int, 0444);
73*4882a593Smuzhiyun MODULE_PARM_DESC(throttlequeuedepth,
74*4882a593Smuzhiyun "Adapter queue depth when throttled due to I/O timeout. Default: 16");
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77*4882a593Smuzhiyun module_param(resetwaittime, int, 0444);
78*4882a593Smuzhiyun MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static int smp_affinity_enable = 1;
81*4882a593Smuzhiyun module_param(smp_affinity_enable, int, 0444);
82*4882a593Smuzhiyun MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static int rdpq_enable = 1;
85*4882a593Smuzhiyun module_param(rdpq_enable, int, 0444);
86*4882a593Smuzhiyun MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun unsigned int dual_qdepth_disable;
89*4882a593Smuzhiyun module_param(dual_qdepth_disable, int, 0444);
90*4882a593Smuzhiyun MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93*4882a593Smuzhiyun module_param(scmd_timeout, int, 0444);
94*4882a593Smuzhiyun MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun int perf_mode = -1;
97*4882a593Smuzhiyun module_param(perf_mode, int, 0444);
98*4882a593Smuzhiyun MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99*4882a593Smuzhiyun "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100*4882a593Smuzhiyun "interrupt coalescing is enabled only on high iops queues\n\t\t"
101*4882a593Smuzhiyun "1 - iops: High iops queues are not allocated &\n\t\t"
102*4882a593Smuzhiyun "interrupt coalescing is enabled on all queues\n\t\t"
103*4882a593Smuzhiyun "2 - latency: High iops queues are not allocated &\n\t\t"
104*4882a593Smuzhiyun "interrupt coalescing is disabled on all queues\n\t\t"
105*4882a593Smuzhiyun "default mode is 'balanced'"
106*4882a593Smuzhiyun );
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun int event_log_level = MFI_EVT_CLASS_CRITICAL;
109*4882a593Smuzhiyun module_param(event_log_level, int, 0644);
110*4882a593Smuzhiyun MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun unsigned int enable_sdev_max_qd;
113*4882a593Smuzhiyun module_param(enable_sdev_max_qd, int, 0444);
114*4882a593Smuzhiyun MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun MODULE_LICENSE("GPL");
117*4882a593Smuzhiyun MODULE_VERSION(MEGASAS_VERSION);
118*4882a593Smuzhiyun MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119*4882a593Smuzhiyun MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122*4882a593Smuzhiyun static int megasas_get_pd_list(struct megasas_instance *instance);
123*4882a593Smuzhiyun static int megasas_ld_list_query(struct megasas_instance *instance,
124*4882a593Smuzhiyun u8 query_type);
125*4882a593Smuzhiyun static int megasas_issue_init_mfi(struct megasas_instance *instance);
126*4882a593Smuzhiyun static int megasas_register_aen(struct megasas_instance *instance,
127*4882a593Smuzhiyun u32 seq_num, u32 class_locale_word);
128*4882a593Smuzhiyun static void megasas_get_pd_info(struct megasas_instance *instance,
129*4882a593Smuzhiyun struct scsi_device *sdev);
130*4882a593Smuzhiyun static void
131*4882a593Smuzhiyun megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * PCI ID table for all supported controllers
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun static struct pci_device_id megasas_pci_table[] = {
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
139*4882a593Smuzhiyun /* xscale IOP */
140*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
141*4882a593Smuzhiyun /* ppc IOP */
142*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
143*4882a593Smuzhiyun /* ppc IOP */
144*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
145*4882a593Smuzhiyun /* gen2*/
146*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
147*4882a593Smuzhiyun /* gen2*/
148*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
149*4882a593Smuzhiyun /* skinny*/
150*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
151*4882a593Smuzhiyun /* skinny*/
152*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
153*4882a593Smuzhiyun /* xscale IOP, vega */
154*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
155*4882a593Smuzhiyun /* xscale IOP */
156*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
157*4882a593Smuzhiyun /* Fusion */
158*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
159*4882a593Smuzhiyun /* Plasma */
160*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
161*4882a593Smuzhiyun /* Invader */
162*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
163*4882a593Smuzhiyun /* Fury */
164*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
165*4882a593Smuzhiyun /* Intruder */
166*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
167*4882a593Smuzhiyun /* Intruder 24 port*/
168*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
169*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
170*4882a593Smuzhiyun /* VENTURA */
171*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
172*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
173*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
174*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
175*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
176*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
177*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
178*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
179*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
180*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
181*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
182*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
183*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
184*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
185*4882a593Smuzhiyun {}
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, megasas_pci_table);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static int megasas_mgmt_majorno;
191*4882a593Smuzhiyun struct megasas_mgmt_info megasas_mgmt_info;
192*4882a593Smuzhiyun static struct fasync_struct *megasas_async_queue;
193*4882a593Smuzhiyun static DEFINE_MUTEX(megasas_async_queue_mutex);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun static int megasas_poll_wait_aen;
196*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
197*4882a593Smuzhiyun static u32 support_poll_for_event;
198*4882a593Smuzhiyun u32 megasas_dbg_lvl;
199*4882a593Smuzhiyun static u32 support_device_change;
200*4882a593Smuzhiyun static bool support_nvme_encapsulation;
201*4882a593Smuzhiyun static bool support_pci_lane_margining;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* define lock for aen poll */
204*4882a593Smuzhiyun static spinlock_t poll_aen_lock;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun extern struct dentry *megasas_debugfs_root;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun void
209*4882a593Smuzhiyun megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
210*4882a593Smuzhiyun u8 alt_status);
211*4882a593Smuzhiyun static u32
212*4882a593Smuzhiyun megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
213*4882a593Smuzhiyun static int
214*4882a593Smuzhiyun megasas_adp_reset_gen2(struct megasas_instance *instance,
215*4882a593Smuzhiyun struct megasas_register_set __iomem *reg_set);
216*4882a593Smuzhiyun static irqreturn_t megasas_isr(int irq, void *devp);
217*4882a593Smuzhiyun static u32
218*4882a593Smuzhiyun megasas_init_adapter_mfi(struct megasas_instance *instance);
219*4882a593Smuzhiyun u32
220*4882a593Smuzhiyun megasas_build_and_issue_cmd(struct megasas_instance *instance,
221*4882a593Smuzhiyun struct scsi_cmnd *scmd);
222*4882a593Smuzhiyun static void megasas_complete_cmd_dpc(unsigned long instance_addr);
223*4882a593Smuzhiyun int
224*4882a593Smuzhiyun wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
225*4882a593Smuzhiyun int seconds);
226*4882a593Smuzhiyun void megasas_fusion_ocr_wq(struct work_struct *work);
227*4882a593Smuzhiyun static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
228*4882a593Smuzhiyun int initial);
229*4882a593Smuzhiyun static int
230*4882a593Smuzhiyun megasas_set_dma_mask(struct megasas_instance *instance);
231*4882a593Smuzhiyun static int
232*4882a593Smuzhiyun megasas_alloc_ctrl_mem(struct megasas_instance *instance);
233*4882a593Smuzhiyun static inline void
234*4882a593Smuzhiyun megasas_free_ctrl_mem(struct megasas_instance *instance);
235*4882a593Smuzhiyun static inline int
236*4882a593Smuzhiyun megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
237*4882a593Smuzhiyun static inline void
238*4882a593Smuzhiyun megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
239*4882a593Smuzhiyun static inline void
240*4882a593Smuzhiyun megasas_init_ctrl_params(struct megasas_instance *instance);
241*4882a593Smuzhiyun
megasas_readl(struct megasas_instance * instance,const volatile void __iomem * addr)242*4882a593Smuzhiyun u32 megasas_readl(struct megasas_instance *instance,
243*4882a593Smuzhiyun const volatile void __iomem *addr)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun u32 i = 0, ret_val;
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * Due to a HW errata in Aero controllers, reads to certain
248*4882a593Smuzhiyun * Fusion registers could intermittently return all zeroes.
249*4882a593Smuzhiyun * This behavior is transient in nature and subsequent reads will
250*4882a593Smuzhiyun * return valid value. As a workaround in driver, retry readl for
251*4882a593Smuzhiyun * upto three times until a non-zero value is read.
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun if (instance->adapter_type == AERO_SERIES) {
254*4882a593Smuzhiyun do {
255*4882a593Smuzhiyun ret_val = readl(addr);
256*4882a593Smuzhiyun i++;
257*4882a593Smuzhiyun } while (ret_val == 0 && i < 3);
258*4882a593Smuzhiyun return ret_val;
259*4882a593Smuzhiyun } else {
260*4882a593Smuzhiyun return readl(addr);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
266*4882a593Smuzhiyun * @instance: Adapter soft state
267*4882a593Smuzhiyun * @dcmd: DCMD frame inside MFI command
268*4882a593Smuzhiyun * @dma_addr: DMA address of buffer to be passed to FW
269*4882a593Smuzhiyun * @dma_len: Length of DMA buffer to be passed to FW
270*4882a593Smuzhiyun * @return: void
271*4882a593Smuzhiyun */
megasas_set_dma_settings(struct megasas_instance * instance,struct megasas_dcmd_frame * dcmd,dma_addr_t dma_addr,u32 dma_len)272*4882a593Smuzhiyun void megasas_set_dma_settings(struct megasas_instance *instance,
273*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd,
274*4882a593Smuzhiyun dma_addr_t dma_addr, u32 dma_len)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun if (instance->consistent_mask_64bit) {
277*4882a593Smuzhiyun dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
278*4882a593Smuzhiyun dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
279*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun } else {
282*4882a593Smuzhiyun dcmd->sgl.sge32[0].phys_addr =
283*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(dma_addr));
284*4882a593Smuzhiyun dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
285*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(dcmd->flags);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static void
megasas_issue_dcmd(struct megasas_instance * instance,struct megasas_cmd * cmd)290*4882a593Smuzhiyun megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun instance->instancet->fire_cmd(instance,
293*4882a593Smuzhiyun cmd->frame_phys_addr, 0, instance->reg_set);
294*4882a593Smuzhiyun return;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun * megasas_get_cmd - Get a command from the free pool
299*4882a593Smuzhiyun * @instance: Adapter soft state
300*4882a593Smuzhiyun *
301*4882a593Smuzhiyun * Returns a free command from the pool
302*4882a593Smuzhiyun */
megasas_get_cmd(struct megasas_instance * instance)303*4882a593Smuzhiyun struct megasas_cmd *megasas_get_cmd(struct megasas_instance
304*4882a593Smuzhiyun *instance)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun unsigned long flags;
307*4882a593Smuzhiyun struct megasas_cmd *cmd = NULL;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun spin_lock_irqsave(&instance->mfi_pool_lock, flags);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (!list_empty(&instance->cmd_pool)) {
312*4882a593Smuzhiyun cmd = list_entry((&instance->cmd_pool)->next,
313*4882a593Smuzhiyun struct megasas_cmd, list);
314*4882a593Smuzhiyun list_del_init(&cmd->list);
315*4882a593Smuzhiyun } else {
316*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Command pool empty!\n");
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
320*4882a593Smuzhiyun return cmd;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * megasas_return_cmd - Return a cmd to free command pool
325*4882a593Smuzhiyun * @instance: Adapter soft state
326*4882a593Smuzhiyun * @cmd: Command packet to be returned to free command pool
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun void
megasas_return_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)329*4882a593Smuzhiyun megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun unsigned long flags;
332*4882a593Smuzhiyun u32 blk_tags;
333*4882a593Smuzhiyun struct megasas_cmd_fusion *cmd_fusion;
334*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* This flag is used only for fusion adapter.
337*4882a593Smuzhiyun * Wait for Interrupt for Polled mode DCMD
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun if (cmd->flags & DRV_DCMD_POLLED_MODE)
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun spin_lock_irqsave(&instance->mfi_pool_lock, flags);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (fusion) {
345*4882a593Smuzhiyun blk_tags = instance->max_scsi_cmds + cmd->index;
346*4882a593Smuzhiyun cmd_fusion = fusion->cmd_list[blk_tags];
347*4882a593Smuzhiyun megasas_return_cmd_fusion(instance, cmd_fusion);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun cmd->scmd = NULL;
350*4882a593Smuzhiyun cmd->frame_count = 0;
351*4882a593Smuzhiyun cmd->flags = 0;
352*4882a593Smuzhiyun memset(cmd->frame, 0, instance->mfi_frame_size);
353*4882a593Smuzhiyun cmd->frame->io.context = cpu_to_le32(cmd->index);
354*4882a593Smuzhiyun if (!fusion && reset_devices)
355*4882a593Smuzhiyun cmd->frame->hdr.cmd = MFI_CMD_INVALID;
356*4882a593Smuzhiyun list_add(&cmd->list, (&instance->cmd_pool)->next);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun static const char *
format_timestamp(uint32_t timestamp)363*4882a593Smuzhiyun format_timestamp(uint32_t timestamp)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun static char buffer[32];
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if ((timestamp & 0xff000000) == 0xff000000)
368*4882a593Smuzhiyun snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
369*4882a593Smuzhiyun 0x00ffffff);
370*4882a593Smuzhiyun else
371*4882a593Smuzhiyun snprintf(buffer, sizeof(buffer), "%us", timestamp);
372*4882a593Smuzhiyun return buffer;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun static const char *
format_class(int8_t class)376*4882a593Smuzhiyun format_class(int8_t class)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun static char buffer[6];
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun switch (class) {
381*4882a593Smuzhiyun case MFI_EVT_CLASS_DEBUG:
382*4882a593Smuzhiyun return "debug";
383*4882a593Smuzhiyun case MFI_EVT_CLASS_PROGRESS:
384*4882a593Smuzhiyun return "progress";
385*4882a593Smuzhiyun case MFI_EVT_CLASS_INFO:
386*4882a593Smuzhiyun return "info";
387*4882a593Smuzhiyun case MFI_EVT_CLASS_WARNING:
388*4882a593Smuzhiyun return "WARN";
389*4882a593Smuzhiyun case MFI_EVT_CLASS_CRITICAL:
390*4882a593Smuzhiyun return "CRIT";
391*4882a593Smuzhiyun case MFI_EVT_CLASS_FATAL:
392*4882a593Smuzhiyun return "FATAL";
393*4882a593Smuzhiyun case MFI_EVT_CLASS_DEAD:
394*4882a593Smuzhiyun return "DEAD";
395*4882a593Smuzhiyun default:
396*4882a593Smuzhiyun snprintf(buffer, sizeof(buffer), "%d", class);
397*4882a593Smuzhiyun return buffer;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /**
402*4882a593Smuzhiyun * megasas_decode_evt: Decode FW AEN event and print critical event
403*4882a593Smuzhiyun * for information.
404*4882a593Smuzhiyun * @instance: Adapter soft state
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun static void
megasas_decode_evt(struct megasas_instance * instance)407*4882a593Smuzhiyun megasas_decode_evt(struct megasas_instance *instance)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun struct megasas_evt_detail *evt_detail = instance->evt_detail;
410*4882a593Smuzhiyun union megasas_evt_class_locale class_locale;
411*4882a593Smuzhiyun class_locale.word = le32_to_cpu(evt_detail->cl.word);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
414*4882a593Smuzhiyun (event_log_level > MFI_EVT_CLASS_DEAD)) {
415*4882a593Smuzhiyun printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
416*4882a593Smuzhiyun event_log_level = MFI_EVT_CLASS_CRITICAL;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (class_locale.members.class >= event_log_level)
420*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
421*4882a593Smuzhiyun le32_to_cpu(evt_detail->seq_num),
422*4882a593Smuzhiyun format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
423*4882a593Smuzhiyun (class_locale.members.locale),
424*4882a593Smuzhiyun format_class(class_locale.members.class),
425*4882a593Smuzhiyun evt_detail->description);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
428*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
429*4882a593Smuzhiyun "evt_detail.args.ld.target_id/index %d/%d\n",
430*4882a593Smuzhiyun evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * The following functions are defined for xscale
436*4882a593Smuzhiyun * (deviceid : 1064R, PERC5) controllers
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /**
440*4882a593Smuzhiyun * megasas_enable_intr_xscale - Enables interrupts
441*4882a593Smuzhiyun * @instance: Adapter soft state
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun static inline void
megasas_enable_intr_xscale(struct megasas_instance * instance)444*4882a593Smuzhiyun megasas_enable_intr_xscale(struct megasas_instance *instance)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun regs = instance->reg_set;
449*4882a593Smuzhiyun writel(0, &(regs)->outbound_intr_mask);
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /* Dummy readl to force pci flush */
452*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun * megasas_disable_intr_xscale -Disables interrupt
457*4882a593Smuzhiyun * @instance: Adapter soft state
458*4882a593Smuzhiyun */
459*4882a593Smuzhiyun static inline void
megasas_disable_intr_xscale(struct megasas_instance * instance)460*4882a593Smuzhiyun megasas_disable_intr_xscale(struct megasas_instance *instance)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
463*4882a593Smuzhiyun u32 mask = 0x1f;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun regs = instance->reg_set;
466*4882a593Smuzhiyun writel(mask, ®s->outbound_intr_mask);
467*4882a593Smuzhiyun /* Dummy readl to force pci flush */
468*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /**
472*4882a593Smuzhiyun * megasas_read_fw_status_reg_xscale - returns the current FW status value
473*4882a593Smuzhiyun * @instance: Adapter soft state
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun static u32
megasas_read_fw_status_reg_xscale(struct megasas_instance * instance)476*4882a593Smuzhiyun megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun return readl(&instance->reg_set->outbound_msg_0);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun /**
481*4882a593Smuzhiyun * megasas_clear_interrupt_xscale - Check & clear interrupt
482*4882a593Smuzhiyun * @instance: Adapter soft state
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun static int
megasas_clear_intr_xscale(struct megasas_instance * instance)485*4882a593Smuzhiyun megasas_clear_intr_xscale(struct megasas_instance *instance)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun u32 status;
488*4882a593Smuzhiyun u32 mfiStatus = 0;
489*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
490*4882a593Smuzhiyun regs = instance->reg_set;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /*
493*4882a593Smuzhiyun * Check if it is our interrupt
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun status = readl(®s->outbound_intr_status);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (status & MFI_OB_INTR_STATUS_MASK)
498*4882a593Smuzhiyun mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
499*4882a593Smuzhiyun if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
500*4882a593Smuzhiyun mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /*
503*4882a593Smuzhiyun * Clear the interrupt by writing back the same value
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun if (mfiStatus)
506*4882a593Smuzhiyun writel(status, ®s->outbound_intr_status);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Dummy readl to force pci flush */
509*4882a593Smuzhiyun readl(®s->outbound_intr_status);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun return mfiStatus;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /**
515*4882a593Smuzhiyun * megasas_fire_cmd_xscale - Sends command to the FW
516*4882a593Smuzhiyun * @instance: Adapter soft state
517*4882a593Smuzhiyun * @frame_phys_addr : Physical address of cmd
518*4882a593Smuzhiyun * @frame_count : Number of frames for the command
519*4882a593Smuzhiyun * @regs : MFI register set
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun static inline void
megasas_fire_cmd_xscale(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)522*4882a593Smuzhiyun megasas_fire_cmd_xscale(struct megasas_instance *instance,
523*4882a593Smuzhiyun dma_addr_t frame_phys_addr,
524*4882a593Smuzhiyun u32 frame_count,
525*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun unsigned long flags;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
530*4882a593Smuzhiyun writel((frame_phys_addr >> 3)|(frame_count),
531*4882a593Smuzhiyun &(regs)->inbound_queue_port);
532*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun * megasas_adp_reset_xscale - For controller reset
537*4882a593Smuzhiyun * @instance: Adapter soft state
538*4882a593Smuzhiyun * @regs: MFI register set
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun static int
megasas_adp_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)541*4882a593Smuzhiyun megasas_adp_reset_xscale(struct megasas_instance *instance,
542*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun u32 i;
545*4882a593Smuzhiyun u32 pcidata;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun writel(MFI_ADP_RESET, ®s->inbound_doorbell);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun for (i = 0; i < 3; i++)
550*4882a593Smuzhiyun msleep(1000); /* sleep for 3 secs */
551*4882a593Smuzhiyun pcidata = 0;
552*4882a593Smuzhiyun pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
553*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
554*4882a593Smuzhiyun if (pcidata & 0x2) {
555*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
556*4882a593Smuzhiyun pcidata &= ~0x2;
557*4882a593Smuzhiyun pci_write_config_dword(instance->pdev,
558*4882a593Smuzhiyun MFI_1068_PCSR_OFFSET, pcidata);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun for (i = 0; i < 2; i++)
561*4882a593Smuzhiyun msleep(1000); /* need to wait 2 secs again */
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun pcidata = 0;
564*4882a593Smuzhiyun pci_read_config_dword(instance->pdev,
565*4882a593Smuzhiyun MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
566*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
567*4882a593Smuzhiyun if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
568*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
569*4882a593Smuzhiyun pcidata = 0;
570*4882a593Smuzhiyun pci_write_config_dword(instance->pdev,
571*4882a593Smuzhiyun MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /**
578*4882a593Smuzhiyun * megasas_check_reset_xscale - For controller reset check
579*4882a593Smuzhiyun * @instance: Adapter soft state
580*4882a593Smuzhiyun * @regs: MFI register set
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun static int
megasas_check_reset_xscale(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)583*4882a593Smuzhiyun megasas_check_reset_xscale(struct megasas_instance *instance,
584*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
587*4882a593Smuzhiyun (le32_to_cpu(*instance->consumer) ==
588*4882a593Smuzhiyun MEGASAS_ADPRESET_INPROG_SIGN))
589*4882a593Smuzhiyun return 1;
590*4882a593Smuzhiyun return 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun static struct megasas_instance_template megasas_instance_template_xscale = {
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun .fire_cmd = megasas_fire_cmd_xscale,
596*4882a593Smuzhiyun .enable_intr = megasas_enable_intr_xscale,
597*4882a593Smuzhiyun .disable_intr = megasas_disable_intr_xscale,
598*4882a593Smuzhiyun .clear_intr = megasas_clear_intr_xscale,
599*4882a593Smuzhiyun .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
600*4882a593Smuzhiyun .adp_reset = megasas_adp_reset_xscale,
601*4882a593Smuzhiyun .check_reset = megasas_check_reset_xscale,
602*4882a593Smuzhiyun .service_isr = megasas_isr,
603*4882a593Smuzhiyun .tasklet = megasas_complete_cmd_dpc,
604*4882a593Smuzhiyun .init_adapter = megasas_init_adapter_mfi,
605*4882a593Smuzhiyun .build_and_issue_cmd = megasas_build_and_issue_cmd,
606*4882a593Smuzhiyun .issue_dcmd = megasas_issue_dcmd,
607*4882a593Smuzhiyun };
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * This is the end of set of functions & definitions specific
611*4882a593Smuzhiyun * to xscale (deviceid : 1064R, PERC5) controllers
612*4882a593Smuzhiyun */
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * The following functions are defined for ppc (deviceid : 0x60)
616*4882a593Smuzhiyun * controllers
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun * megasas_enable_intr_ppc - Enables interrupts
621*4882a593Smuzhiyun * @instance: Adapter soft state
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun static inline void
megasas_enable_intr_ppc(struct megasas_instance * instance)624*4882a593Smuzhiyun megasas_enable_intr_ppc(struct megasas_instance *instance)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun regs = instance->reg_set;
629*4882a593Smuzhiyun writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun writel(~0x80000000, &(regs)->outbound_intr_mask);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Dummy readl to force pci flush */
634*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /**
638*4882a593Smuzhiyun * megasas_disable_intr_ppc - Disable interrupt
639*4882a593Smuzhiyun * @instance: Adapter soft state
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun static inline void
megasas_disable_intr_ppc(struct megasas_instance * instance)642*4882a593Smuzhiyun megasas_disable_intr_ppc(struct megasas_instance *instance)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
645*4882a593Smuzhiyun u32 mask = 0xFFFFFFFF;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun regs = instance->reg_set;
648*4882a593Smuzhiyun writel(mask, ®s->outbound_intr_mask);
649*4882a593Smuzhiyun /* Dummy readl to force pci flush */
650*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun /**
654*4882a593Smuzhiyun * megasas_read_fw_status_reg_ppc - returns the current FW status value
655*4882a593Smuzhiyun * @instance: Adapter soft state
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun static u32
megasas_read_fw_status_reg_ppc(struct megasas_instance * instance)658*4882a593Smuzhiyun megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun return readl(&instance->reg_set->outbound_scratch_pad_0);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /**
664*4882a593Smuzhiyun * megasas_clear_interrupt_ppc - Check & clear interrupt
665*4882a593Smuzhiyun * @instance: Adapter soft state
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun static int
megasas_clear_intr_ppc(struct megasas_instance * instance)668*4882a593Smuzhiyun megasas_clear_intr_ppc(struct megasas_instance *instance)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun u32 status, mfiStatus = 0;
671*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
672*4882a593Smuzhiyun regs = instance->reg_set;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun * Check if it is our interrupt
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun status = readl(®s->outbound_intr_status);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
680*4882a593Smuzhiyun mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
683*4882a593Smuzhiyun mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /*
686*4882a593Smuzhiyun * Clear the interrupt by writing back the same value
687*4882a593Smuzhiyun */
688*4882a593Smuzhiyun writel(status, ®s->outbound_doorbell_clear);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Dummy readl to force pci flush */
691*4882a593Smuzhiyun readl(®s->outbound_doorbell_clear);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return mfiStatus;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun * megasas_fire_cmd_ppc - Sends command to the FW
698*4882a593Smuzhiyun * @instance: Adapter soft state
699*4882a593Smuzhiyun * @frame_phys_addr: Physical address of cmd
700*4882a593Smuzhiyun * @frame_count: Number of frames for the command
701*4882a593Smuzhiyun * @regs: MFI register set
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun static inline void
megasas_fire_cmd_ppc(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)704*4882a593Smuzhiyun megasas_fire_cmd_ppc(struct megasas_instance *instance,
705*4882a593Smuzhiyun dma_addr_t frame_phys_addr,
706*4882a593Smuzhiyun u32 frame_count,
707*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun unsigned long flags;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
712*4882a593Smuzhiyun writel((frame_phys_addr | (frame_count<<1))|1,
713*4882a593Smuzhiyun &(regs)->inbound_queue_port);
714*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun * megasas_check_reset_ppc - For controller reset check
719*4882a593Smuzhiyun * @instance: Adapter soft state
720*4882a593Smuzhiyun * @regs: MFI register set
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun static int
megasas_check_reset_ppc(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)723*4882a593Smuzhiyun megasas_check_reset_ppc(struct megasas_instance *instance,
724*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
727*4882a593Smuzhiyun return 1;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun return 0;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun static struct megasas_instance_template megasas_instance_template_ppc = {
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun .fire_cmd = megasas_fire_cmd_ppc,
735*4882a593Smuzhiyun .enable_intr = megasas_enable_intr_ppc,
736*4882a593Smuzhiyun .disable_intr = megasas_disable_intr_ppc,
737*4882a593Smuzhiyun .clear_intr = megasas_clear_intr_ppc,
738*4882a593Smuzhiyun .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
739*4882a593Smuzhiyun .adp_reset = megasas_adp_reset_xscale,
740*4882a593Smuzhiyun .check_reset = megasas_check_reset_ppc,
741*4882a593Smuzhiyun .service_isr = megasas_isr,
742*4882a593Smuzhiyun .tasklet = megasas_complete_cmd_dpc,
743*4882a593Smuzhiyun .init_adapter = megasas_init_adapter_mfi,
744*4882a593Smuzhiyun .build_and_issue_cmd = megasas_build_and_issue_cmd,
745*4882a593Smuzhiyun .issue_dcmd = megasas_issue_dcmd,
746*4882a593Smuzhiyun };
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun * megasas_enable_intr_skinny - Enables interrupts
750*4882a593Smuzhiyun * @instance: Adapter soft state
751*4882a593Smuzhiyun */
752*4882a593Smuzhiyun static inline void
megasas_enable_intr_skinny(struct megasas_instance * instance)753*4882a593Smuzhiyun megasas_enable_intr_skinny(struct megasas_instance *instance)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun regs = instance->reg_set;
758*4882a593Smuzhiyun writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* Dummy readl to force pci flush */
763*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /**
767*4882a593Smuzhiyun * megasas_disable_intr_skinny - Disables interrupt
768*4882a593Smuzhiyun * @instance: Adapter soft state
769*4882a593Smuzhiyun */
770*4882a593Smuzhiyun static inline void
megasas_disable_intr_skinny(struct megasas_instance * instance)771*4882a593Smuzhiyun megasas_disable_intr_skinny(struct megasas_instance *instance)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
774*4882a593Smuzhiyun u32 mask = 0xFFFFFFFF;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun regs = instance->reg_set;
777*4882a593Smuzhiyun writel(mask, ®s->outbound_intr_mask);
778*4882a593Smuzhiyun /* Dummy readl to force pci flush */
779*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /**
783*4882a593Smuzhiyun * megasas_read_fw_status_reg_skinny - returns the current FW status value
784*4882a593Smuzhiyun * @instance: Adapter soft state
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun static u32
megasas_read_fw_status_reg_skinny(struct megasas_instance * instance)787*4882a593Smuzhiyun megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun return readl(&instance->reg_set->outbound_scratch_pad_0);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /**
793*4882a593Smuzhiyun * megasas_clear_interrupt_skinny - Check & clear interrupt
794*4882a593Smuzhiyun * @instance: Adapter soft state
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun static int
megasas_clear_intr_skinny(struct megasas_instance * instance)797*4882a593Smuzhiyun megasas_clear_intr_skinny(struct megasas_instance *instance)
798*4882a593Smuzhiyun {
799*4882a593Smuzhiyun u32 status;
800*4882a593Smuzhiyun u32 mfiStatus = 0;
801*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
802*4882a593Smuzhiyun regs = instance->reg_set;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /*
805*4882a593Smuzhiyun * Check if it is our interrupt
806*4882a593Smuzhiyun */
807*4882a593Smuzhiyun status = readl(®s->outbound_intr_status);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
810*4882a593Smuzhiyun return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * Check if it is our interrupt
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
817*4882a593Smuzhiyun MFI_STATE_FAULT) {
818*4882a593Smuzhiyun mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
819*4882a593Smuzhiyun } else
820*4882a593Smuzhiyun mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * Clear the interrupt by writing back the same value
824*4882a593Smuzhiyun */
825*4882a593Smuzhiyun writel(status, ®s->outbound_intr_status);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun * dummy read to flush PCI
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun readl(®s->outbound_intr_status);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun return mfiStatus;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /**
836*4882a593Smuzhiyun * megasas_fire_cmd_skinny - Sends command to the FW
837*4882a593Smuzhiyun * @instance: Adapter soft state
838*4882a593Smuzhiyun * @frame_phys_addr: Physical address of cmd
839*4882a593Smuzhiyun * @frame_count: Number of frames for the command
840*4882a593Smuzhiyun * @regs: MFI register set
841*4882a593Smuzhiyun */
842*4882a593Smuzhiyun static inline void
megasas_fire_cmd_skinny(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)843*4882a593Smuzhiyun megasas_fire_cmd_skinny(struct megasas_instance *instance,
844*4882a593Smuzhiyun dma_addr_t frame_phys_addr,
845*4882a593Smuzhiyun u32 frame_count,
846*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun unsigned long flags;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
851*4882a593Smuzhiyun writel(upper_32_bits(frame_phys_addr),
852*4882a593Smuzhiyun &(regs)->inbound_high_queue_port);
853*4882a593Smuzhiyun writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
854*4882a593Smuzhiyun &(regs)->inbound_low_queue_port);
855*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /**
859*4882a593Smuzhiyun * megasas_check_reset_skinny - For controller reset check
860*4882a593Smuzhiyun * @instance: Adapter soft state
861*4882a593Smuzhiyun * @regs: MFI register set
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun static int
megasas_check_reset_skinny(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)864*4882a593Smuzhiyun megasas_check_reset_skinny(struct megasas_instance *instance,
865*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
868*4882a593Smuzhiyun return 1;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun static struct megasas_instance_template megasas_instance_template_skinny = {
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun .fire_cmd = megasas_fire_cmd_skinny,
876*4882a593Smuzhiyun .enable_intr = megasas_enable_intr_skinny,
877*4882a593Smuzhiyun .disable_intr = megasas_disable_intr_skinny,
878*4882a593Smuzhiyun .clear_intr = megasas_clear_intr_skinny,
879*4882a593Smuzhiyun .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
880*4882a593Smuzhiyun .adp_reset = megasas_adp_reset_gen2,
881*4882a593Smuzhiyun .check_reset = megasas_check_reset_skinny,
882*4882a593Smuzhiyun .service_isr = megasas_isr,
883*4882a593Smuzhiyun .tasklet = megasas_complete_cmd_dpc,
884*4882a593Smuzhiyun .init_adapter = megasas_init_adapter_mfi,
885*4882a593Smuzhiyun .build_and_issue_cmd = megasas_build_and_issue_cmd,
886*4882a593Smuzhiyun .issue_dcmd = megasas_issue_dcmd,
887*4882a593Smuzhiyun };
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /*
891*4882a593Smuzhiyun * The following functions are defined for gen2 (deviceid : 0x78 0x79)
892*4882a593Smuzhiyun * controllers
893*4882a593Smuzhiyun */
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /**
896*4882a593Smuzhiyun * megasas_enable_intr_gen2 - Enables interrupts
897*4882a593Smuzhiyun * @instance: Adapter soft state
898*4882a593Smuzhiyun */
899*4882a593Smuzhiyun static inline void
megasas_enable_intr_gen2(struct megasas_instance * instance)900*4882a593Smuzhiyun megasas_enable_intr_gen2(struct megasas_instance *instance)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun regs = instance->reg_set;
905*4882a593Smuzhiyun writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* write ~0x00000005 (4 & 1) to the intr mask*/
908*4882a593Smuzhiyun writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /* Dummy readl to force pci flush */
911*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /**
915*4882a593Smuzhiyun * megasas_disable_intr_gen2 - Disables interrupt
916*4882a593Smuzhiyun * @instance: Adapter soft state
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun static inline void
megasas_disable_intr_gen2(struct megasas_instance * instance)919*4882a593Smuzhiyun megasas_disable_intr_gen2(struct megasas_instance *instance)
920*4882a593Smuzhiyun {
921*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
922*4882a593Smuzhiyun u32 mask = 0xFFFFFFFF;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun regs = instance->reg_set;
925*4882a593Smuzhiyun writel(mask, ®s->outbound_intr_mask);
926*4882a593Smuzhiyun /* Dummy readl to force pci flush */
927*4882a593Smuzhiyun readl(®s->outbound_intr_mask);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /**
931*4882a593Smuzhiyun * megasas_read_fw_status_reg_gen2 - returns the current FW status value
932*4882a593Smuzhiyun * @instance: Adapter soft state
933*4882a593Smuzhiyun */
934*4882a593Smuzhiyun static u32
megasas_read_fw_status_reg_gen2(struct megasas_instance * instance)935*4882a593Smuzhiyun megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun return readl(&instance->reg_set->outbound_scratch_pad_0);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /**
941*4882a593Smuzhiyun * megasas_clear_interrupt_gen2 - Check & clear interrupt
942*4882a593Smuzhiyun * @instance: Adapter soft state
943*4882a593Smuzhiyun */
944*4882a593Smuzhiyun static int
megasas_clear_intr_gen2(struct megasas_instance * instance)945*4882a593Smuzhiyun megasas_clear_intr_gen2(struct megasas_instance *instance)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun u32 status;
948*4882a593Smuzhiyun u32 mfiStatus = 0;
949*4882a593Smuzhiyun struct megasas_register_set __iomem *regs;
950*4882a593Smuzhiyun regs = instance->reg_set;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun * Check if it is our interrupt
954*4882a593Smuzhiyun */
955*4882a593Smuzhiyun status = readl(®s->outbound_intr_status);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
958*4882a593Smuzhiyun mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
961*4882a593Smuzhiyun mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun /*
965*4882a593Smuzhiyun * Clear the interrupt by writing back the same value
966*4882a593Smuzhiyun */
967*4882a593Smuzhiyun if (mfiStatus)
968*4882a593Smuzhiyun writel(status, ®s->outbound_doorbell_clear);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /* Dummy readl to force pci flush */
971*4882a593Smuzhiyun readl(®s->outbound_intr_status);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return mfiStatus;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /**
977*4882a593Smuzhiyun * megasas_fire_cmd_gen2 - Sends command to the FW
978*4882a593Smuzhiyun * @instance: Adapter soft state
979*4882a593Smuzhiyun * @frame_phys_addr: Physical address of cmd
980*4882a593Smuzhiyun * @frame_count: Number of frames for the command
981*4882a593Smuzhiyun * @regs: MFI register set
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun static inline void
megasas_fire_cmd_gen2(struct megasas_instance * instance,dma_addr_t frame_phys_addr,u32 frame_count,struct megasas_register_set __iomem * regs)984*4882a593Smuzhiyun megasas_fire_cmd_gen2(struct megasas_instance *instance,
985*4882a593Smuzhiyun dma_addr_t frame_phys_addr,
986*4882a593Smuzhiyun u32 frame_count,
987*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun unsigned long flags;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
992*4882a593Smuzhiyun writel((frame_phys_addr | (frame_count<<1))|1,
993*4882a593Smuzhiyun &(regs)->inbound_queue_port);
994*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /**
998*4882a593Smuzhiyun * megasas_adp_reset_gen2 - For controller reset
999*4882a593Smuzhiyun * @instance: Adapter soft state
1000*4882a593Smuzhiyun * @reg_set: MFI register set
1001*4882a593Smuzhiyun */
1002*4882a593Smuzhiyun static int
megasas_adp_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * reg_set)1003*4882a593Smuzhiyun megasas_adp_reset_gen2(struct megasas_instance *instance,
1004*4882a593Smuzhiyun struct megasas_register_set __iomem *reg_set)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun u32 retry = 0 ;
1007*4882a593Smuzhiyun u32 HostDiag;
1008*4882a593Smuzhiyun u32 __iomem *seq_offset = ®_set->seq_offset;
1009*4882a593Smuzhiyun u32 __iomem *hostdiag_offset = ®_set->host_diag;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (instance->instancet == &megasas_instance_template_skinny) {
1012*4882a593Smuzhiyun seq_offset = ®_set->fusion_seq_offset;
1013*4882a593Smuzhiyun hostdiag_offset = ®_set->fusion_host_diag;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun writel(0, seq_offset);
1017*4882a593Smuzhiyun writel(4, seq_offset);
1018*4882a593Smuzhiyun writel(0xb, seq_offset);
1019*4882a593Smuzhiyun writel(2, seq_offset);
1020*4882a593Smuzhiyun writel(7, seq_offset);
1021*4882a593Smuzhiyun writel(0xd, seq_offset);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun msleep(1000);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun HostDiag = (u32)readl(hostdiag_offset);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1028*4882a593Smuzhiyun msleep(100);
1029*4882a593Smuzhiyun HostDiag = (u32)readl(hostdiag_offset);
1030*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1031*4882a593Smuzhiyun retry, HostDiag);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (retry++ >= 100)
1034*4882a593Smuzhiyun return 1;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun ssleep(10);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun HostDiag = (u32)readl(hostdiag_offset);
1045*4882a593Smuzhiyun while (HostDiag & DIAG_RESET_ADAPTER) {
1046*4882a593Smuzhiyun msleep(100);
1047*4882a593Smuzhiyun HostDiag = (u32)readl(hostdiag_offset);
1048*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1049*4882a593Smuzhiyun retry, HostDiag);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun if (retry++ >= 1000)
1052*4882a593Smuzhiyun return 1;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun return 0;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /**
1059*4882a593Smuzhiyun * megasas_check_reset_gen2 - For controller reset check
1060*4882a593Smuzhiyun * @instance: Adapter soft state
1061*4882a593Smuzhiyun * @regs: MFI register set
1062*4882a593Smuzhiyun */
1063*4882a593Smuzhiyun static int
megasas_check_reset_gen2(struct megasas_instance * instance,struct megasas_register_set __iomem * regs)1064*4882a593Smuzhiyun megasas_check_reset_gen2(struct megasas_instance *instance,
1065*4882a593Smuzhiyun struct megasas_register_set __iomem *regs)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1068*4882a593Smuzhiyun return 1;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun return 0;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun static struct megasas_instance_template megasas_instance_template_gen2 = {
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun .fire_cmd = megasas_fire_cmd_gen2,
1076*4882a593Smuzhiyun .enable_intr = megasas_enable_intr_gen2,
1077*4882a593Smuzhiyun .disable_intr = megasas_disable_intr_gen2,
1078*4882a593Smuzhiyun .clear_intr = megasas_clear_intr_gen2,
1079*4882a593Smuzhiyun .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1080*4882a593Smuzhiyun .adp_reset = megasas_adp_reset_gen2,
1081*4882a593Smuzhiyun .check_reset = megasas_check_reset_gen2,
1082*4882a593Smuzhiyun .service_isr = megasas_isr,
1083*4882a593Smuzhiyun .tasklet = megasas_complete_cmd_dpc,
1084*4882a593Smuzhiyun .init_adapter = megasas_init_adapter_mfi,
1085*4882a593Smuzhiyun .build_and_issue_cmd = megasas_build_and_issue_cmd,
1086*4882a593Smuzhiyun .issue_dcmd = megasas_issue_dcmd,
1087*4882a593Smuzhiyun };
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun /*
1090*4882a593Smuzhiyun * This is the end of set of functions & definitions
1091*4882a593Smuzhiyun * specific to gen2 (deviceid : 0x78, 0x79) controllers
1092*4882a593Smuzhiyun */
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /*
1095*4882a593Smuzhiyun * Template added for TB (Fusion)
1096*4882a593Smuzhiyun */
1097*4882a593Smuzhiyun extern struct megasas_instance_template megasas_instance_template_fusion;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /**
1100*4882a593Smuzhiyun * megasas_issue_polled - Issues a polling command
1101*4882a593Smuzhiyun * @instance: Adapter soft state
1102*4882a593Smuzhiyun * @cmd: Command packet to be issued
1103*4882a593Smuzhiyun *
1104*4882a593Smuzhiyun * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1105*4882a593Smuzhiyun */
1106*4882a593Smuzhiyun int
megasas_issue_polled(struct megasas_instance * instance,struct megasas_cmd * cmd)1107*4882a593Smuzhiyun megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct megasas_header *frame_hdr = &cmd->frame->hdr;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1112*4882a593Smuzhiyun frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1115*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1116*4882a593Smuzhiyun __func__, __LINE__);
1117*4882a593Smuzhiyun return DCMD_INIT;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun instance->instancet->issue_dcmd(instance, cmd);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun return wait_and_poll(instance, cmd, instance->requestorId ?
1123*4882a593Smuzhiyun MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun /**
1127*4882a593Smuzhiyun * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1128*4882a593Smuzhiyun * @instance: Adapter soft state
1129*4882a593Smuzhiyun * @cmd: Command to be issued
1130*4882a593Smuzhiyun * @timeout: Timeout in seconds
1131*4882a593Smuzhiyun *
1132*4882a593Smuzhiyun * This function waits on an event for the command to be returned from ISR.
1133*4882a593Smuzhiyun * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1134*4882a593Smuzhiyun * Used to issue ioctl commands.
1135*4882a593Smuzhiyun */
1136*4882a593Smuzhiyun int
megasas_issue_blocked_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,int timeout)1137*4882a593Smuzhiyun megasas_issue_blocked_cmd(struct megasas_instance *instance,
1138*4882a593Smuzhiyun struct megasas_cmd *cmd, int timeout)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun int ret = 0;
1141*4882a593Smuzhiyun cmd->cmd_status_drv = DCMD_INIT;
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1144*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1145*4882a593Smuzhiyun __func__, __LINE__);
1146*4882a593Smuzhiyun return DCMD_INIT;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun instance->instancet->issue_dcmd(instance, cmd);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun if (timeout) {
1152*4882a593Smuzhiyun ret = wait_event_timeout(instance->int_cmd_wait_q,
1153*4882a593Smuzhiyun cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1154*4882a593Smuzhiyun if (!ret) {
1155*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
1156*4882a593Smuzhiyun "DCMD(opcode: 0x%x) is timed out, func:%s\n",
1157*4882a593Smuzhiyun cmd->frame->dcmd.opcode, __func__);
1158*4882a593Smuzhiyun return DCMD_TIMEOUT;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun } else
1161*4882a593Smuzhiyun wait_event(instance->int_cmd_wait_q,
1162*4882a593Smuzhiyun cmd->cmd_status_drv != DCMD_INIT);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun return cmd->cmd_status_drv;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /**
1168*4882a593Smuzhiyun * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1169*4882a593Smuzhiyun * @instance: Adapter soft state
1170*4882a593Smuzhiyun * @cmd_to_abort: Previously issued cmd to be aborted
1171*4882a593Smuzhiyun * @timeout: Timeout in seconds
1172*4882a593Smuzhiyun *
1173*4882a593Smuzhiyun * MFI firmware can abort previously issued AEN comamnd (automatic event
1174*4882a593Smuzhiyun * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1175*4882a593Smuzhiyun * cmd and waits for return status.
1176*4882a593Smuzhiyun * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1177*4882a593Smuzhiyun */
1178*4882a593Smuzhiyun static int
megasas_issue_blocked_abort_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd_to_abort,int timeout)1179*4882a593Smuzhiyun megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1180*4882a593Smuzhiyun struct megasas_cmd *cmd_to_abort, int timeout)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun struct megasas_cmd *cmd;
1183*4882a593Smuzhiyun struct megasas_abort_frame *abort_fr;
1184*4882a593Smuzhiyun int ret = 0;
1185*4882a593Smuzhiyun u32 opcode;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (!cmd)
1190*4882a593Smuzhiyun return -1;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun abort_fr = &cmd->frame->abort;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun /*
1195*4882a593Smuzhiyun * Prepare and issue the abort frame
1196*4882a593Smuzhiyun */
1197*4882a593Smuzhiyun abort_fr->cmd = MFI_CMD_ABORT;
1198*4882a593Smuzhiyun abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1199*4882a593Smuzhiyun abort_fr->flags = cpu_to_le16(0);
1200*4882a593Smuzhiyun abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1201*4882a593Smuzhiyun abort_fr->abort_mfi_phys_addr_lo =
1202*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1203*4882a593Smuzhiyun abort_fr->abort_mfi_phys_addr_hi =
1204*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun cmd->sync_cmd = 1;
1207*4882a593Smuzhiyun cmd->cmd_status_drv = DCMD_INIT;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1210*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1211*4882a593Smuzhiyun __func__, __LINE__);
1212*4882a593Smuzhiyun return DCMD_INIT;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun instance->instancet->issue_dcmd(instance, cmd);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun if (timeout) {
1218*4882a593Smuzhiyun ret = wait_event_timeout(instance->abort_cmd_wait_q,
1219*4882a593Smuzhiyun cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1220*4882a593Smuzhiyun if (!ret) {
1221*4882a593Smuzhiyun opcode = cmd_to_abort->frame->dcmd.opcode;
1222*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
1223*4882a593Smuzhiyun "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1224*4882a593Smuzhiyun opcode, __func__);
1225*4882a593Smuzhiyun return DCMD_TIMEOUT;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun } else
1228*4882a593Smuzhiyun wait_event(instance->abort_cmd_wait_q,
1229*4882a593Smuzhiyun cmd->cmd_status_drv != DCMD_INIT);
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun cmd->sync_cmd = 0;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
1234*4882a593Smuzhiyun return cmd->cmd_status_drv;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /**
1238*4882a593Smuzhiyun * megasas_make_sgl32 - Prepares 32-bit SGL
1239*4882a593Smuzhiyun * @instance: Adapter soft state
1240*4882a593Smuzhiyun * @scp: SCSI command from the mid-layer
1241*4882a593Smuzhiyun * @mfi_sgl: SGL to be filled in
1242*4882a593Smuzhiyun *
1243*4882a593Smuzhiyun * If successful, this function returns the number of SG elements. Otherwise,
1244*4882a593Smuzhiyun * it returnes -1.
1245*4882a593Smuzhiyun */
1246*4882a593Smuzhiyun static int
megasas_make_sgl32(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1247*4882a593Smuzhiyun megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1248*4882a593Smuzhiyun union megasas_sgl *mfi_sgl)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun int i;
1251*4882a593Smuzhiyun int sge_count;
1252*4882a593Smuzhiyun struct scatterlist *os_sgl;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun sge_count = scsi_dma_map(scp);
1255*4882a593Smuzhiyun BUG_ON(sge_count < 0);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (sge_count) {
1258*4882a593Smuzhiyun scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1259*4882a593Smuzhiyun mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1260*4882a593Smuzhiyun mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun return sge_count;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun /**
1267*4882a593Smuzhiyun * megasas_make_sgl64 - Prepares 64-bit SGL
1268*4882a593Smuzhiyun * @instance: Adapter soft state
1269*4882a593Smuzhiyun * @scp: SCSI command from the mid-layer
1270*4882a593Smuzhiyun * @mfi_sgl: SGL to be filled in
1271*4882a593Smuzhiyun *
1272*4882a593Smuzhiyun * If successful, this function returns the number of SG elements. Otherwise,
1273*4882a593Smuzhiyun * it returnes -1.
1274*4882a593Smuzhiyun */
1275*4882a593Smuzhiyun static int
megasas_make_sgl64(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1276*4882a593Smuzhiyun megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1277*4882a593Smuzhiyun union megasas_sgl *mfi_sgl)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun int i;
1280*4882a593Smuzhiyun int sge_count;
1281*4882a593Smuzhiyun struct scatterlist *os_sgl;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun sge_count = scsi_dma_map(scp);
1284*4882a593Smuzhiyun BUG_ON(sge_count < 0);
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun if (sge_count) {
1287*4882a593Smuzhiyun scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1288*4882a593Smuzhiyun mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1289*4882a593Smuzhiyun mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun return sge_count;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun /**
1296*4882a593Smuzhiyun * megasas_make_sgl_skinny - Prepares IEEE SGL
1297*4882a593Smuzhiyun * @instance: Adapter soft state
1298*4882a593Smuzhiyun * @scp: SCSI command from the mid-layer
1299*4882a593Smuzhiyun * @mfi_sgl: SGL to be filled in
1300*4882a593Smuzhiyun *
1301*4882a593Smuzhiyun * If successful, this function returns the number of SG elements. Otherwise,
1302*4882a593Smuzhiyun * it returnes -1.
1303*4882a593Smuzhiyun */
1304*4882a593Smuzhiyun static int
megasas_make_sgl_skinny(struct megasas_instance * instance,struct scsi_cmnd * scp,union megasas_sgl * mfi_sgl)1305*4882a593Smuzhiyun megasas_make_sgl_skinny(struct megasas_instance *instance,
1306*4882a593Smuzhiyun struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun int i;
1309*4882a593Smuzhiyun int sge_count;
1310*4882a593Smuzhiyun struct scatterlist *os_sgl;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun sge_count = scsi_dma_map(scp);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (sge_count) {
1315*4882a593Smuzhiyun scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1316*4882a593Smuzhiyun mfi_sgl->sge_skinny[i].length =
1317*4882a593Smuzhiyun cpu_to_le32(sg_dma_len(os_sgl));
1318*4882a593Smuzhiyun mfi_sgl->sge_skinny[i].phys_addr =
1319*4882a593Smuzhiyun cpu_to_le64(sg_dma_address(os_sgl));
1320*4882a593Smuzhiyun mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun return sge_count;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun /**
1327*4882a593Smuzhiyun * megasas_get_frame_count - Computes the number of frames
1328*4882a593Smuzhiyun * @frame_type : type of frame- io or pthru frame
1329*4882a593Smuzhiyun * @sge_count : number of sg elements
1330*4882a593Smuzhiyun *
1331*4882a593Smuzhiyun * Returns the number of frames required for numnber of sge's (sge_count)
1332*4882a593Smuzhiyun */
1333*4882a593Smuzhiyun
megasas_get_frame_count(struct megasas_instance * instance,u8 sge_count,u8 frame_type)1334*4882a593Smuzhiyun static u32 megasas_get_frame_count(struct megasas_instance *instance,
1335*4882a593Smuzhiyun u8 sge_count, u8 frame_type)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun int num_cnt;
1338*4882a593Smuzhiyun int sge_bytes;
1339*4882a593Smuzhiyun u32 sge_sz;
1340*4882a593Smuzhiyun u32 frame_count = 0;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1343*4882a593Smuzhiyun sizeof(struct megasas_sge32);
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (instance->flag_ieee) {
1346*4882a593Smuzhiyun sge_sz = sizeof(struct megasas_sge_skinny);
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun /*
1350*4882a593Smuzhiyun * Main frame can contain 2 SGEs for 64-bit SGLs and
1351*4882a593Smuzhiyun * 3 SGEs for 32-bit SGLs for ldio &
1352*4882a593Smuzhiyun * 1 SGEs for 64-bit SGLs and
1353*4882a593Smuzhiyun * 2 SGEs for 32-bit SGLs for pthru frame
1354*4882a593Smuzhiyun */
1355*4882a593Smuzhiyun if (unlikely(frame_type == PTHRU_FRAME)) {
1356*4882a593Smuzhiyun if (instance->flag_ieee == 1) {
1357*4882a593Smuzhiyun num_cnt = sge_count - 1;
1358*4882a593Smuzhiyun } else if (IS_DMA64)
1359*4882a593Smuzhiyun num_cnt = sge_count - 1;
1360*4882a593Smuzhiyun else
1361*4882a593Smuzhiyun num_cnt = sge_count - 2;
1362*4882a593Smuzhiyun } else {
1363*4882a593Smuzhiyun if (instance->flag_ieee == 1) {
1364*4882a593Smuzhiyun num_cnt = sge_count - 1;
1365*4882a593Smuzhiyun } else if (IS_DMA64)
1366*4882a593Smuzhiyun num_cnt = sge_count - 2;
1367*4882a593Smuzhiyun else
1368*4882a593Smuzhiyun num_cnt = sge_count - 3;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun if (num_cnt > 0) {
1372*4882a593Smuzhiyun sge_bytes = sge_sz * num_cnt;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1375*4882a593Smuzhiyun ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun /* Main frame */
1378*4882a593Smuzhiyun frame_count += 1;
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun if (frame_count > 7)
1381*4882a593Smuzhiyun frame_count = 8;
1382*4882a593Smuzhiyun return frame_count;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /**
1386*4882a593Smuzhiyun * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1387*4882a593Smuzhiyun * @instance: Adapter soft state
1388*4882a593Smuzhiyun * @scp: SCSI command
1389*4882a593Smuzhiyun * @cmd: Command to be prepared in
1390*4882a593Smuzhiyun *
1391*4882a593Smuzhiyun * This function prepares CDB commands. These are typcially pass-through
1392*4882a593Smuzhiyun * commands to the devices.
1393*4882a593Smuzhiyun */
1394*4882a593Smuzhiyun static int
megasas_build_dcdb(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1395*4882a593Smuzhiyun megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1396*4882a593Smuzhiyun struct megasas_cmd *cmd)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun u32 is_logical;
1399*4882a593Smuzhiyun u32 device_id;
1400*4882a593Smuzhiyun u16 flags = 0;
1401*4882a593Smuzhiyun struct megasas_pthru_frame *pthru;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun is_logical = MEGASAS_IS_LOGICAL(scp->device);
1404*4882a593Smuzhiyun device_id = MEGASAS_DEV_INDEX(scp);
1405*4882a593Smuzhiyun pthru = (struct megasas_pthru_frame *)cmd->frame;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun if (scp->sc_data_direction == DMA_TO_DEVICE)
1408*4882a593Smuzhiyun flags = MFI_FRAME_DIR_WRITE;
1409*4882a593Smuzhiyun else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1410*4882a593Smuzhiyun flags = MFI_FRAME_DIR_READ;
1411*4882a593Smuzhiyun else if (scp->sc_data_direction == DMA_NONE)
1412*4882a593Smuzhiyun flags = MFI_FRAME_DIR_NONE;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if (instance->flag_ieee == 1) {
1415*4882a593Smuzhiyun flags |= MFI_FRAME_IEEE;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /*
1419*4882a593Smuzhiyun * Prepare the DCDB frame
1420*4882a593Smuzhiyun */
1421*4882a593Smuzhiyun pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1422*4882a593Smuzhiyun pthru->cmd_status = 0x0;
1423*4882a593Smuzhiyun pthru->scsi_status = 0x0;
1424*4882a593Smuzhiyun pthru->target_id = device_id;
1425*4882a593Smuzhiyun pthru->lun = scp->device->lun;
1426*4882a593Smuzhiyun pthru->cdb_len = scp->cmd_len;
1427*4882a593Smuzhiyun pthru->timeout = 0;
1428*4882a593Smuzhiyun pthru->pad_0 = 0;
1429*4882a593Smuzhiyun pthru->flags = cpu_to_le16(flags);
1430*4882a593Smuzhiyun pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /*
1435*4882a593Smuzhiyun * If the command is for the tape device, set the
1436*4882a593Smuzhiyun * pthru timeout to the os layer timeout value.
1437*4882a593Smuzhiyun */
1438*4882a593Smuzhiyun if (scp->device->type == TYPE_TAPE) {
1439*4882a593Smuzhiyun if ((scp->request->timeout / HZ) > 0xFFFF)
1440*4882a593Smuzhiyun pthru->timeout = cpu_to_le16(0xFFFF);
1441*4882a593Smuzhiyun else
1442*4882a593Smuzhiyun pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun /*
1446*4882a593Smuzhiyun * Construct SGL
1447*4882a593Smuzhiyun */
1448*4882a593Smuzhiyun if (instance->flag_ieee == 1) {
1449*4882a593Smuzhiyun pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1450*4882a593Smuzhiyun pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1451*4882a593Smuzhiyun &pthru->sgl);
1452*4882a593Smuzhiyun } else if (IS_DMA64) {
1453*4882a593Smuzhiyun pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1454*4882a593Smuzhiyun pthru->sge_count = megasas_make_sgl64(instance, scp,
1455*4882a593Smuzhiyun &pthru->sgl);
1456*4882a593Smuzhiyun } else
1457*4882a593Smuzhiyun pthru->sge_count = megasas_make_sgl32(instance, scp,
1458*4882a593Smuzhiyun &pthru->sgl);
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if (pthru->sge_count > instance->max_num_sge) {
1461*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1462*4882a593Smuzhiyun pthru->sge_count);
1463*4882a593Smuzhiyun return 0;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /*
1467*4882a593Smuzhiyun * Sense info specific
1468*4882a593Smuzhiyun */
1469*4882a593Smuzhiyun pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1470*4882a593Smuzhiyun pthru->sense_buf_phys_addr_hi =
1471*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1472*4882a593Smuzhiyun pthru->sense_buf_phys_addr_lo =
1473*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun /*
1476*4882a593Smuzhiyun * Compute the total number of frames this command consumes. FW uses
1477*4882a593Smuzhiyun * this number to pull sufficient number of frames from host memory.
1478*4882a593Smuzhiyun */
1479*4882a593Smuzhiyun cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1480*4882a593Smuzhiyun PTHRU_FRAME);
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun return cmd->frame_count;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun /**
1486*4882a593Smuzhiyun * megasas_build_ldio - Prepares IOs to logical devices
1487*4882a593Smuzhiyun * @instance: Adapter soft state
1488*4882a593Smuzhiyun * @scp: SCSI command
1489*4882a593Smuzhiyun * @cmd: Command to be prepared
1490*4882a593Smuzhiyun *
1491*4882a593Smuzhiyun * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1492*4882a593Smuzhiyun */
1493*4882a593Smuzhiyun static int
megasas_build_ldio(struct megasas_instance * instance,struct scsi_cmnd * scp,struct megasas_cmd * cmd)1494*4882a593Smuzhiyun megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1495*4882a593Smuzhiyun struct megasas_cmd *cmd)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun u32 device_id;
1498*4882a593Smuzhiyun u8 sc = scp->cmnd[0];
1499*4882a593Smuzhiyun u16 flags = 0;
1500*4882a593Smuzhiyun struct megasas_io_frame *ldio;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun device_id = MEGASAS_DEV_INDEX(scp);
1503*4882a593Smuzhiyun ldio = (struct megasas_io_frame *)cmd->frame;
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun if (scp->sc_data_direction == DMA_TO_DEVICE)
1506*4882a593Smuzhiyun flags = MFI_FRAME_DIR_WRITE;
1507*4882a593Smuzhiyun else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1508*4882a593Smuzhiyun flags = MFI_FRAME_DIR_READ;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun if (instance->flag_ieee == 1) {
1511*4882a593Smuzhiyun flags |= MFI_FRAME_IEEE;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /*
1515*4882a593Smuzhiyun * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1516*4882a593Smuzhiyun */
1517*4882a593Smuzhiyun ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1518*4882a593Smuzhiyun ldio->cmd_status = 0x0;
1519*4882a593Smuzhiyun ldio->scsi_status = 0x0;
1520*4882a593Smuzhiyun ldio->target_id = device_id;
1521*4882a593Smuzhiyun ldio->timeout = 0;
1522*4882a593Smuzhiyun ldio->reserved_0 = 0;
1523*4882a593Smuzhiyun ldio->pad_0 = 0;
1524*4882a593Smuzhiyun ldio->flags = cpu_to_le16(flags);
1525*4882a593Smuzhiyun ldio->start_lba_hi = 0;
1526*4882a593Smuzhiyun ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun /*
1529*4882a593Smuzhiyun * 6-byte READ(0x08) or WRITE(0x0A) cdb
1530*4882a593Smuzhiyun */
1531*4882a593Smuzhiyun if (scp->cmd_len == 6) {
1532*4882a593Smuzhiyun ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1533*4882a593Smuzhiyun ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1534*4882a593Smuzhiyun ((u32) scp->cmnd[2] << 8) |
1535*4882a593Smuzhiyun (u32) scp->cmnd[3]);
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun /*
1541*4882a593Smuzhiyun * 10-byte READ(0x28) or WRITE(0x2A) cdb
1542*4882a593Smuzhiyun */
1543*4882a593Smuzhiyun else if (scp->cmd_len == 10) {
1544*4882a593Smuzhiyun ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1545*4882a593Smuzhiyun ((u32) scp->cmnd[7] << 8));
1546*4882a593Smuzhiyun ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1547*4882a593Smuzhiyun ((u32) scp->cmnd[3] << 16) |
1548*4882a593Smuzhiyun ((u32) scp->cmnd[4] << 8) |
1549*4882a593Smuzhiyun (u32) scp->cmnd[5]);
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun /*
1553*4882a593Smuzhiyun * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1554*4882a593Smuzhiyun */
1555*4882a593Smuzhiyun else if (scp->cmd_len == 12) {
1556*4882a593Smuzhiyun ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1557*4882a593Smuzhiyun ((u32) scp->cmnd[7] << 16) |
1558*4882a593Smuzhiyun ((u32) scp->cmnd[8] << 8) |
1559*4882a593Smuzhiyun (u32) scp->cmnd[9]);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1562*4882a593Smuzhiyun ((u32) scp->cmnd[3] << 16) |
1563*4882a593Smuzhiyun ((u32) scp->cmnd[4] << 8) |
1564*4882a593Smuzhiyun (u32) scp->cmnd[5]);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun * 16-byte READ(0x88) or WRITE(0x8A) cdb
1569*4882a593Smuzhiyun */
1570*4882a593Smuzhiyun else if (scp->cmd_len == 16) {
1571*4882a593Smuzhiyun ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1572*4882a593Smuzhiyun ((u32) scp->cmnd[11] << 16) |
1573*4882a593Smuzhiyun ((u32) scp->cmnd[12] << 8) |
1574*4882a593Smuzhiyun (u32) scp->cmnd[13]);
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1577*4882a593Smuzhiyun ((u32) scp->cmnd[7] << 16) |
1578*4882a593Smuzhiyun ((u32) scp->cmnd[8] << 8) |
1579*4882a593Smuzhiyun (u32) scp->cmnd[9]);
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1582*4882a593Smuzhiyun ((u32) scp->cmnd[3] << 16) |
1583*4882a593Smuzhiyun ((u32) scp->cmnd[4] << 8) |
1584*4882a593Smuzhiyun (u32) scp->cmnd[5]);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun }
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun /*
1589*4882a593Smuzhiyun * Construct SGL
1590*4882a593Smuzhiyun */
1591*4882a593Smuzhiyun if (instance->flag_ieee) {
1592*4882a593Smuzhiyun ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1593*4882a593Smuzhiyun ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1594*4882a593Smuzhiyun &ldio->sgl);
1595*4882a593Smuzhiyun } else if (IS_DMA64) {
1596*4882a593Smuzhiyun ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1597*4882a593Smuzhiyun ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1598*4882a593Smuzhiyun } else
1599*4882a593Smuzhiyun ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun if (ldio->sge_count > instance->max_num_sge) {
1602*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1603*4882a593Smuzhiyun ldio->sge_count);
1604*4882a593Smuzhiyun return 0;
1605*4882a593Smuzhiyun }
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /*
1608*4882a593Smuzhiyun * Sense info specific
1609*4882a593Smuzhiyun */
1610*4882a593Smuzhiyun ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1611*4882a593Smuzhiyun ldio->sense_buf_phys_addr_hi = 0;
1612*4882a593Smuzhiyun ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun /*
1615*4882a593Smuzhiyun * Compute the total number of frames this command consumes. FW uses
1616*4882a593Smuzhiyun * this number to pull sufficient number of frames from host memory.
1617*4882a593Smuzhiyun */
1618*4882a593Smuzhiyun cmd->frame_count = megasas_get_frame_count(instance,
1619*4882a593Smuzhiyun ldio->sge_count, IO_FRAME);
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun return cmd->frame_count;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun /**
1625*4882a593Smuzhiyun * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1626*4882a593Smuzhiyun * and whether it's RW or non RW
1627*4882a593Smuzhiyun * @cmd: SCSI command
1628*4882a593Smuzhiyun *
1629*4882a593Smuzhiyun */
megasas_cmd_type(struct scsi_cmnd * cmd)1630*4882a593Smuzhiyun inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun int ret;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun switch (cmd->cmnd[0]) {
1635*4882a593Smuzhiyun case READ_10:
1636*4882a593Smuzhiyun case WRITE_10:
1637*4882a593Smuzhiyun case READ_12:
1638*4882a593Smuzhiyun case WRITE_12:
1639*4882a593Smuzhiyun case READ_6:
1640*4882a593Smuzhiyun case WRITE_6:
1641*4882a593Smuzhiyun case READ_16:
1642*4882a593Smuzhiyun case WRITE_16:
1643*4882a593Smuzhiyun ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1644*4882a593Smuzhiyun READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1645*4882a593Smuzhiyun break;
1646*4882a593Smuzhiyun default:
1647*4882a593Smuzhiyun ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1648*4882a593Smuzhiyun NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1649*4882a593Smuzhiyun }
1650*4882a593Smuzhiyun return ret;
1651*4882a593Smuzhiyun }
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun /**
1654*4882a593Smuzhiyun * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1655*4882a593Smuzhiyun * in FW
1656*4882a593Smuzhiyun * @instance: Adapter soft state
1657*4882a593Smuzhiyun */
1658*4882a593Smuzhiyun static inline void
megasas_dump_pending_frames(struct megasas_instance * instance)1659*4882a593Smuzhiyun megasas_dump_pending_frames(struct megasas_instance *instance)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun struct megasas_cmd *cmd;
1662*4882a593Smuzhiyun int i,n;
1663*4882a593Smuzhiyun union megasas_sgl *mfi_sgl;
1664*4882a593Smuzhiyun struct megasas_io_frame *ldio;
1665*4882a593Smuzhiyun struct megasas_pthru_frame *pthru;
1666*4882a593Smuzhiyun u32 sgcount;
1667*4882a593Smuzhiyun u16 max_cmd = instance->max_fw_cmds;
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1670*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1671*4882a593Smuzhiyun if (IS_DMA64)
1672*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1673*4882a593Smuzhiyun else
1674*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1677*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
1678*4882a593Smuzhiyun cmd = instance->cmd_list[i];
1679*4882a593Smuzhiyun if (!cmd->scmd)
1680*4882a593Smuzhiyun continue;
1681*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1682*4882a593Smuzhiyun if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1683*4882a593Smuzhiyun ldio = (struct megasas_io_frame *)cmd->frame;
1684*4882a593Smuzhiyun mfi_sgl = &ldio->sgl;
1685*4882a593Smuzhiyun sgcount = ldio->sge_count;
1686*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1687*4882a593Smuzhiyun " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1688*4882a593Smuzhiyun instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1689*4882a593Smuzhiyun le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1690*4882a593Smuzhiyun le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1691*4882a593Smuzhiyun } else {
1692*4882a593Smuzhiyun pthru = (struct megasas_pthru_frame *) cmd->frame;
1693*4882a593Smuzhiyun mfi_sgl = &pthru->sgl;
1694*4882a593Smuzhiyun sgcount = pthru->sge_count;
1695*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1696*4882a593Smuzhiyun "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1697*4882a593Smuzhiyun instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1698*4882a593Smuzhiyun pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1699*4882a593Smuzhiyun le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1702*4882a593Smuzhiyun for (n = 0; n < sgcount; n++) {
1703*4882a593Smuzhiyun if (IS_DMA64)
1704*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1705*4882a593Smuzhiyun le32_to_cpu(mfi_sgl->sge64[n].length),
1706*4882a593Smuzhiyun le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1707*4882a593Smuzhiyun else
1708*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1709*4882a593Smuzhiyun le32_to_cpu(mfi_sgl->sge32[n].length),
1710*4882a593Smuzhiyun le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun } /*for max_cmd*/
1714*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1715*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun cmd = instance->cmd_list[i];
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun if (cmd->sync_cmd == 1)
1720*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun u32
megasas_build_and_issue_cmd(struct megasas_instance * instance,struct scsi_cmnd * scmd)1726*4882a593Smuzhiyun megasas_build_and_issue_cmd(struct megasas_instance *instance,
1727*4882a593Smuzhiyun struct scsi_cmnd *scmd)
1728*4882a593Smuzhiyun {
1729*4882a593Smuzhiyun struct megasas_cmd *cmd;
1730*4882a593Smuzhiyun u32 frame_count;
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
1733*4882a593Smuzhiyun if (!cmd)
1734*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun /*
1737*4882a593Smuzhiyun * Logical drive command
1738*4882a593Smuzhiyun */
1739*4882a593Smuzhiyun if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1740*4882a593Smuzhiyun frame_count = megasas_build_ldio(instance, scmd, cmd);
1741*4882a593Smuzhiyun else
1742*4882a593Smuzhiyun frame_count = megasas_build_dcdb(instance, scmd, cmd);
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun if (!frame_count)
1745*4882a593Smuzhiyun goto out_return_cmd;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun cmd->scmd = scmd;
1748*4882a593Smuzhiyun scmd->SCp.ptr = (char *)cmd;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun /*
1751*4882a593Smuzhiyun * Issue the command to the FW
1752*4882a593Smuzhiyun */
1753*4882a593Smuzhiyun atomic_inc(&instance->fw_outstanding);
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1756*4882a593Smuzhiyun cmd->frame_count-1, instance->reg_set);
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun return 0;
1759*4882a593Smuzhiyun out_return_cmd:
1760*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
1761*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun /**
1766*4882a593Smuzhiyun * megasas_queue_command - Queue entry point
1767*4882a593Smuzhiyun * @shost: adapter SCSI host
1768*4882a593Smuzhiyun * @scmd: SCSI command to be queued
1769*4882a593Smuzhiyun */
1770*4882a593Smuzhiyun static int
megasas_queue_command(struct Scsi_Host * shost,struct scsi_cmnd * scmd)1771*4882a593Smuzhiyun megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun struct megasas_instance *instance;
1774*4882a593Smuzhiyun struct MR_PRIV_DEVICE *mr_device_priv_data;
1775*4882a593Smuzhiyun u32 ld_tgt_id;
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun instance = (struct megasas_instance *)
1778*4882a593Smuzhiyun scmd->device->host->hostdata;
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun if (instance->unload == 1) {
1781*4882a593Smuzhiyun scmd->result = DID_NO_CONNECT << 16;
1782*4882a593Smuzhiyun scmd->scsi_done(scmd);
1783*4882a593Smuzhiyun return 0;
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun if (instance->issuepend_done == 0)
1787*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun /* Check for an mpio path and adjust behavior */
1791*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1792*4882a593Smuzhiyun if (megasas_check_mpio_paths(instance, scmd) ==
1793*4882a593Smuzhiyun (DID_REQUEUE << 16)) {
1794*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1795*4882a593Smuzhiyun } else {
1796*4882a593Smuzhiyun scmd->result = DID_NO_CONNECT << 16;
1797*4882a593Smuzhiyun scmd->scsi_done(scmd);
1798*4882a593Smuzhiyun return 0;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun }
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun mr_device_priv_data = scmd->device->hostdata;
1803*4882a593Smuzhiyun if (!mr_device_priv_data ||
1804*4882a593Smuzhiyun (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
1805*4882a593Smuzhiyun scmd->result = DID_NO_CONNECT << 16;
1806*4882a593Smuzhiyun scmd->scsi_done(scmd);
1807*4882a593Smuzhiyun return 0;
1808*4882a593Smuzhiyun }
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun if (MEGASAS_IS_LOGICAL(scmd->device)) {
1811*4882a593Smuzhiyun ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
1812*4882a593Smuzhiyun if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
1813*4882a593Smuzhiyun scmd->result = DID_NO_CONNECT << 16;
1814*4882a593Smuzhiyun scmd->scsi_done(scmd);
1815*4882a593Smuzhiyun return 0;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1820*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
1821*4882a593Smuzhiyun
1822*4882a593Smuzhiyun if (mr_device_priv_data->tm_busy)
1823*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun scmd->result = 0;
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun if (MEGASAS_IS_LOGICAL(scmd->device) &&
1829*4882a593Smuzhiyun (scmd->device->id >= instance->fw_supported_vd_count ||
1830*4882a593Smuzhiyun scmd->device->lun)) {
1831*4882a593Smuzhiyun scmd->result = DID_BAD_TARGET << 16;
1832*4882a593Smuzhiyun goto out_done;
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1836*4882a593Smuzhiyun MEGASAS_IS_LOGICAL(scmd->device) &&
1837*4882a593Smuzhiyun (!instance->fw_sync_cache_support)) {
1838*4882a593Smuzhiyun scmd->result = DID_OK << 16;
1839*4882a593Smuzhiyun goto out_done;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun return instance->instancet->build_and_issue_cmd(instance, scmd);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun out_done:
1845*4882a593Smuzhiyun scmd->scsi_done(scmd);
1846*4882a593Smuzhiyun return 0;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
megasas_lookup_instance(u16 host_no)1849*4882a593Smuzhiyun static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun int i;
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun if ((megasas_mgmt_info.instance[i]) &&
1856*4882a593Smuzhiyun (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1857*4882a593Smuzhiyun return megasas_mgmt_info.instance[i];
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun return NULL;
1861*4882a593Smuzhiyun }
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun /*
1864*4882a593Smuzhiyun * megasas_set_dynamic_target_properties -
1865*4882a593Smuzhiyun * Device property set by driver may not be static and it is required to be
1866*4882a593Smuzhiyun * updated after OCR
1867*4882a593Smuzhiyun *
1868*4882a593Smuzhiyun * set tm_capable.
1869*4882a593Smuzhiyun * set dma alignment (only for eedp protection enable vd).
1870*4882a593Smuzhiyun *
1871*4882a593Smuzhiyun * @sdev: OS provided scsi device
1872*4882a593Smuzhiyun *
1873*4882a593Smuzhiyun * Returns void
1874*4882a593Smuzhiyun */
megasas_set_dynamic_target_properties(struct scsi_device * sdev,bool is_target_prop)1875*4882a593Smuzhiyun void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1876*4882a593Smuzhiyun bool is_target_prop)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun u16 pd_index = 0, ld;
1879*4882a593Smuzhiyun u32 device_id;
1880*4882a593Smuzhiyun struct megasas_instance *instance;
1881*4882a593Smuzhiyun struct fusion_context *fusion;
1882*4882a593Smuzhiyun struct MR_PRIV_DEVICE *mr_device_priv_data;
1883*4882a593Smuzhiyun struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1884*4882a593Smuzhiyun struct MR_LD_RAID *raid;
1885*4882a593Smuzhiyun struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
1888*4882a593Smuzhiyun fusion = instance->ctrl_context;
1889*4882a593Smuzhiyun mr_device_priv_data = sdev->hostdata;
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun if (!fusion || !mr_device_priv_data)
1892*4882a593Smuzhiyun return;
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun if (MEGASAS_IS_LOGICAL(sdev)) {
1895*4882a593Smuzhiyun device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1896*4882a593Smuzhiyun + sdev->id;
1897*4882a593Smuzhiyun local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1898*4882a593Smuzhiyun ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1899*4882a593Smuzhiyun if (ld >= instance->fw_supported_vd_count)
1900*4882a593Smuzhiyun return;
1901*4882a593Smuzhiyun raid = MR_LdRaidGet(ld, local_map_ptr);
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1904*4882a593Smuzhiyun blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun mr_device_priv_data->is_tm_capable =
1907*4882a593Smuzhiyun raid->capability.tmCapable;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun if (!raid->flags.isEPD)
1910*4882a593Smuzhiyun sdev->no_write_same = 1;
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun } else if (instance->use_seqnum_jbod_fp) {
1913*4882a593Smuzhiyun pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1914*4882a593Smuzhiyun sdev->id;
1915*4882a593Smuzhiyun pd_sync = (void *)fusion->pd_seq_sync
1916*4882a593Smuzhiyun [(instance->pd_seq_map_id - 1) & 1];
1917*4882a593Smuzhiyun mr_device_priv_data->is_tm_capable =
1918*4882a593Smuzhiyun pd_sync->seq[pd_index].capability.tmCapable;
1919*4882a593Smuzhiyun }
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun if (is_target_prop && instance->tgt_prop->reset_tmo) {
1922*4882a593Smuzhiyun /*
1923*4882a593Smuzhiyun * If FW provides a target reset timeout value, driver will use
1924*4882a593Smuzhiyun * it. If not set, fallback to default values.
1925*4882a593Smuzhiyun */
1926*4882a593Smuzhiyun mr_device_priv_data->target_reset_tmo =
1927*4882a593Smuzhiyun min_t(u8, instance->max_reset_tmo,
1928*4882a593Smuzhiyun instance->tgt_prop->reset_tmo);
1929*4882a593Smuzhiyun mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1930*4882a593Smuzhiyun } else {
1931*4882a593Smuzhiyun mr_device_priv_data->target_reset_tmo =
1932*4882a593Smuzhiyun MEGASAS_DEFAULT_TM_TIMEOUT;
1933*4882a593Smuzhiyun mr_device_priv_data->task_abort_tmo =
1934*4882a593Smuzhiyun MEGASAS_DEFAULT_TM_TIMEOUT;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun /*
1939*4882a593Smuzhiyun * megasas_set_nvme_device_properties -
1940*4882a593Smuzhiyun * set nomerges=2
1941*4882a593Smuzhiyun * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1942*4882a593Smuzhiyun * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1943*4882a593Smuzhiyun *
1944*4882a593Smuzhiyun * MR firmware provides value in KB. Caller of this function converts
1945*4882a593Smuzhiyun * kb into bytes.
1946*4882a593Smuzhiyun *
1947*4882a593Smuzhiyun * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1948*4882a593Smuzhiyun * MR firmware provides value 128 as (32 * 4K) = 128K.
1949*4882a593Smuzhiyun *
1950*4882a593Smuzhiyun * @sdev: scsi device
1951*4882a593Smuzhiyun * @max_io_size: maximum io transfer size
1952*4882a593Smuzhiyun *
1953*4882a593Smuzhiyun */
1954*4882a593Smuzhiyun static inline void
megasas_set_nvme_device_properties(struct scsi_device * sdev,u32 max_io_size)1955*4882a593Smuzhiyun megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1956*4882a593Smuzhiyun {
1957*4882a593Smuzhiyun struct megasas_instance *instance;
1958*4882a593Smuzhiyun u32 mr_nvme_pg_size;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun instance = (struct megasas_instance *)sdev->host->hostdata;
1961*4882a593Smuzhiyun mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1962*4882a593Smuzhiyun MR_DEFAULT_NVME_PAGE_SIZE);
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1967*4882a593Smuzhiyun blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1968*4882a593Smuzhiyun }
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun /*
1971*4882a593Smuzhiyun * megasas_set_fw_assisted_qd -
1972*4882a593Smuzhiyun * set device queue depth to can_queue
1973*4882a593Smuzhiyun * set device queue depth to fw assisted qd
1974*4882a593Smuzhiyun *
1975*4882a593Smuzhiyun * @sdev: scsi device
1976*4882a593Smuzhiyun * @is_target_prop true, if fw provided target properties.
1977*4882a593Smuzhiyun */
megasas_set_fw_assisted_qd(struct scsi_device * sdev,bool is_target_prop)1978*4882a593Smuzhiyun static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1979*4882a593Smuzhiyun bool is_target_prop)
1980*4882a593Smuzhiyun {
1981*4882a593Smuzhiyun u8 interface_type;
1982*4882a593Smuzhiyun u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1983*4882a593Smuzhiyun u32 tgt_device_qd;
1984*4882a593Smuzhiyun struct megasas_instance *instance;
1985*4882a593Smuzhiyun struct MR_PRIV_DEVICE *mr_device_priv_data;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
1988*4882a593Smuzhiyun mr_device_priv_data = sdev->hostdata;
1989*4882a593Smuzhiyun interface_type = mr_device_priv_data->interface_type;
1990*4882a593Smuzhiyun
1991*4882a593Smuzhiyun switch (interface_type) {
1992*4882a593Smuzhiyun case SAS_PD:
1993*4882a593Smuzhiyun device_qd = MEGASAS_SAS_QD;
1994*4882a593Smuzhiyun break;
1995*4882a593Smuzhiyun case SATA_PD:
1996*4882a593Smuzhiyun device_qd = MEGASAS_SATA_QD;
1997*4882a593Smuzhiyun break;
1998*4882a593Smuzhiyun case NVME_PD:
1999*4882a593Smuzhiyun device_qd = MEGASAS_NVME_QD;
2000*4882a593Smuzhiyun break;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun if (is_target_prop) {
2004*4882a593Smuzhiyun tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
2005*4882a593Smuzhiyun if (tgt_device_qd)
2006*4882a593Smuzhiyun device_qd = min(instance->host->can_queue,
2007*4882a593Smuzhiyun (int)tgt_device_qd);
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
2011*4882a593Smuzhiyun device_qd = instance->host->can_queue;
2012*4882a593Smuzhiyun
2013*4882a593Smuzhiyun scsi_change_queue_depth(sdev, device_qd);
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun
2016*4882a593Smuzhiyun /*
2017*4882a593Smuzhiyun * megasas_set_static_target_properties -
2018*4882a593Smuzhiyun * Device property set by driver are static and it is not required to be
2019*4882a593Smuzhiyun * updated after OCR.
2020*4882a593Smuzhiyun *
2021*4882a593Smuzhiyun * set io timeout
2022*4882a593Smuzhiyun * set device queue depth
2023*4882a593Smuzhiyun * set nvme device properties. see - megasas_set_nvme_device_properties
2024*4882a593Smuzhiyun *
2025*4882a593Smuzhiyun * @sdev: scsi device
2026*4882a593Smuzhiyun * @is_target_prop true, if fw provided target properties.
2027*4882a593Smuzhiyun */
megasas_set_static_target_properties(struct scsi_device * sdev,bool is_target_prop)2028*4882a593Smuzhiyun static void megasas_set_static_target_properties(struct scsi_device *sdev,
2029*4882a593Smuzhiyun bool is_target_prop)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2032*4882a593Smuzhiyun struct megasas_instance *instance;
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun /*
2037*4882a593Smuzhiyun * The RAID firmware may require extended timeouts.
2038*4882a593Smuzhiyun */
2039*4882a593Smuzhiyun blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun /* max_io_size_kb will be set to non zero for
2042*4882a593Smuzhiyun * nvme based vd and syspd.
2043*4882a593Smuzhiyun */
2044*4882a593Smuzhiyun if (is_target_prop)
2045*4882a593Smuzhiyun max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun if (instance->nvme_page_size && max_io_size_kb)
2048*4882a593Smuzhiyun megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun megasas_set_fw_assisted_qd(sdev, is_target_prop);
2051*4882a593Smuzhiyun }
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun
megasas_slave_configure(struct scsi_device * sdev)2054*4882a593Smuzhiyun static int megasas_slave_configure(struct scsi_device *sdev)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun u16 pd_index = 0;
2057*4882a593Smuzhiyun struct megasas_instance *instance;
2058*4882a593Smuzhiyun int ret_target_prop = DCMD_FAILED;
2059*4882a593Smuzhiyun bool is_target_prop = false;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
2062*4882a593Smuzhiyun if (instance->pd_list_not_supported) {
2063*4882a593Smuzhiyun if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2064*4882a593Smuzhiyun pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2065*4882a593Smuzhiyun sdev->id;
2066*4882a593Smuzhiyun if (instance->pd_list[pd_index].driveState !=
2067*4882a593Smuzhiyun MR_PD_STATE_SYSTEM)
2068*4882a593Smuzhiyun return -ENXIO;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
2073*4882a593Smuzhiyun /* Send DCMD to Firmware and cache the information */
2074*4882a593Smuzhiyun if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2075*4882a593Smuzhiyun megasas_get_pd_info(instance, sdev);
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun /* Some ventura firmware may not have instance->nvme_page_size set.
2078*4882a593Smuzhiyun * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2079*4882a593Smuzhiyun */
2080*4882a593Smuzhiyun if ((instance->tgt_prop) && (instance->nvme_page_size))
2081*4882a593Smuzhiyun ret_target_prop = megasas_get_target_prop(instance, sdev);
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2084*4882a593Smuzhiyun megasas_set_static_target_properties(sdev, is_target_prop);
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun /* This sdev property may change post OCR */
2087*4882a593Smuzhiyun megasas_set_dynamic_target_properties(sdev, is_target_prop);
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun return 0;
2092*4882a593Smuzhiyun }
2093*4882a593Smuzhiyun
megasas_slave_alloc(struct scsi_device * sdev)2094*4882a593Smuzhiyun static int megasas_slave_alloc(struct scsi_device *sdev)
2095*4882a593Smuzhiyun {
2096*4882a593Smuzhiyun u16 pd_index = 0, ld_tgt_id;
2097*4882a593Smuzhiyun struct megasas_instance *instance ;
2098*4882a593Smuzhiyun struct MR_PRIV_DEVICE *mr_device_priv_data;
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
2101*4882a593Smuzhiyun if (!MEGASAS_IS_LOGICAL(sdev)) {
2102*4882a593Smuzhiyun /*
2103*4882a593Smuzhiyun * Open the OS scan to the SYSTEM PD
2104*4882a593Smuzhiyun */
2105*4882a593Smuzhiyun pd_index =
2106*4882a593Smuzhiyun (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2107*4882a593Smuzhiyun sdev->id;
2108*4882a593Smuzhiyun if ((instance->pd_list_not_supported ||
2109*4882a593Smuzhiyun instance->pd_list[pd_index].driveState ==
2110*4882a593Smuzhiyun MR_PD_STATE_SYSTEM)) {
2111*4882a593Smuzhiyun goto scan_target;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun return -ENXIO;
2114*4882a593Smuzhiyun } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
2115*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2116*4882a593Smuzhiyun return -ENXIO;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun scan_target:
2120*4882a593Smuzhiyun mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2121*4882a593Smuzhiyun GFP_KERNEL);
2122*4882a593Smuzhiyun if (!mr_device_priv_data)
2123*4882a593Smuzhiyun return -ENOMEM;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (MEGASAS_IS_LOGICAL(sdev)) {
2126*4882a593Smuzhiyun ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2127*4882a593Smuzhiyun instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
2128*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
2129*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun sdev->hostdata = mr_device_priv_data;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun atomic_set(&mr_device_priv_data->r1_ldio_hint,
2135*4882a593Smuzhiyun instance->r1_ldio_hint_default);
2136*4882a593Smuzhiyun return 0;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun
megasas_slave_destroy(struct scsi_device * sdev)2139*4882a593Smuzhiyun static void megasas_slave_destroy(struct scsi_device *sdev)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun u16 ld_tgt_id;
2142*4882a593Smuzhiyun struct megasas_instance *instance;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun instance = megasas_lookup_instance(sdev->host->host_no);
2145*4882a593Smuzhiyun
2146*4882a593Smuzhiyun if (MEGASAS_IS_LOGICAL(sdev)) {
2147*4882a593Smuzhiyun if (!MEGASAS_IS_LUN_VALID(sdev)) {
2148*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2149*4882a593Smuzhiyun return;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2152*4882a593Smuzhiyun instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
2153*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
2154*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev,
2155*4882a593Smuzhiyun "LD target ID %d removed from OS stack\n", ld_tgt_id);
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun kfree(sdev->hostdata);
2159*4882a593Smuzhiyun sdev->hostdata = NULL;
2160*4882a593Smuzhiyun }
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun /*
2163*4882a593Smuzhiyun * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2164*4882a593Smuzhiyun * kill adapter
2165*4882a593Smuzhiyun * @instance: Adapter soft state
2166*4882a593Smuzhiyun *
2167*4882a593Smuzhiyun */
megasas_complete_outstanding_ioctls(struct megasas_instance * instance)2168*4882a593Smuzhiyun static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2169*4882a593Smuzhiyun {
2170*4882a593Smuzhiyun int i;
2171*4882a593Smuzhiyun struct megasas_cmd *cmd_mfi;
2172*4882a593Smuzhiyun struct megasas_cmd_fusion *cmd_fusion;
2173*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun /* Find all outstanding ioctls */
2176*4882a593Smuzhiyun if (fusion) {
2177*4882a593Smuzhiyun for (i = 0; i < instance->max_fw_cmds; i++) {
2178*4882a593Smuzhiyun cmd_fusion = fusion->cmd_list[i];
2179*4882a593Smuzhiyun if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2180*4882a593Smuzhiyun cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2181*4882a593Smuzhiyun if (cmd_mfi->sync_cmd &&
2182*4882a593Smuzhiyun (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2183*4882a593Smuzhiyun cmd_mfi->frame->hdr.cmd_status =
2184*4882a593Smuzhiyun MFI_STAT_WRONG_STATE;
2185*4882a593Smuzhiyun megasas_complete_cmd(instance,
2186*4882a593Smuzhiyun cmd_mfi, DID_OK);
2187*4882a593Smuzhiyun }
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun } else {
2191*4882a593Smuzhiyun for (i = 0; i < instance->max_fw_cmds; i++) {
2192*4882a593Smuzhiyun cmd_mfi = instance->cmd_list[i];
2193*4882a593Smuzhiyun if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2194*4882a593Smuzhiyun MFI_CMD_ABORT)
2195*4882a593Smuzhiyun megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun }
2199*4882a593Smuzhiyun
2200*4882a593Smuzhiyun
megaraid_sas_kill_hba(struct megasas_instance * instance)2201*4882a593Smuzhiyun void megaraid_sas_kill_hba(struct megasas_instance *instance)
2202*4882a593Smuzhiyun {
2203*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2204*4882a593Smuzhiyun dev_warn(&instance->pdev->dev,
2205*4882a593Smuzhiyun "Adapter already dead, skipping kill HBA\n");
2206*4882a593Smuzhiyun return;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun /* Set critical error to block I/O & ioctls in case caller didn't */
2210*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2211*4882a593Smuzhiyun /* Wait 1 second to ensure IO or ioctls in build have posted */
2212*4882a593Smuzhiyun msleep(1000);
2213*4882a593Smuzhiyun if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2214*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2215*4882a593Smuzhiyun (instance->adapter_type != MFI_SERIES)) {
2216*4882a593Smuzhiyun if (!instance->requestorId) {
2217*4882a593Smuzhiyun writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2218*4882a593Smuzhiyun /* Flush */
2219*4882a593Smuzhiyun readl(&instance->reg_set->doorbell);
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun if (instance->requestorId && instance->peerIsPresent)
2222*4882a593Smuzhiyun memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2223*4882a593Smuzhiyun } else {
2224*4882a593Smuzhiyun writel(MFI_STOP_ADP,
2225*4882a593Smuzhiyun &instance->reg_set->inbound_doorbell);
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun /* Complete outstanding ioctls when adapter is killed */
2228*4882a593Smuzhiyun megasas_complete_outstanding_ioctls(instance);
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
2231*4882a593Smuzhiyun /**
2232*4882a593Smuzhiyun * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2233*4882a593Smuzhiyun * restored to max value
2234*4882a593Smuzhiyun * @instance: Adapter soft state
2235*4882a593Smuzhiyun *
2236*4882a593Smuzhiyun */
2237*4882a593Smuzhiyun void
megasas_check_and_restore_queue_depth(struct megasas_instance * instance)2238*4882a593Smuzhiyun megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun unsigned long flags;
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun if (instance->flag & MEGASAS_FW_BUSY
2243*4882a593Smuzhiyun && time_after(jiffies, instance->last_time + 5 * HZ)
2244*4882a593Smuzhiyun && atomic_read(&instance->fw_outstanding) <
2245*4882a593Smuzhiyun instance->throttlequeuedepth + 1) {
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun spin_lock_irqsave(instance->host->host_lock, flags);
2248*4882a593Smuzhiyun instance->flag &= ~MEGASAS_FW_BUSY;
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun instance->host->can_queue = instance->cur_can_queue;
2251*4882a593Smuzhiyun spin_unlock_irqrestore(instance->host->host_lock, flags);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun }
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun /**
2256*4882a593Smuzhiyun * megasas_complete_cmd_dpc - Returns FW's controller structure
2257*4882a593Smuzhiyun * @instance_addr: Address of adapter soft state
2258*4882a593Smuzhiyun *
2259*4882a593Smuzhiyun * Tasklet to complete cmds
2260*4882a593Smuzhiyun */
megasas_complete_cmd_dpc(unsigned long instance_addr)2261*4882a593Smuzhiyun static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2262*4882a593Smuzhiyun {
2263*4882a593Smuzhiyun u32 producer;
2264*4882a593Smuzhiyun u32 consumer;
2265*4882a593Smuzhiyun u32 context;
2266*4882a593Smuzhiyun struct megasas_cmd *cmd;
2267*4882a593Smuzhiyun struct megasas_instance *instance =
2268*4882a593Smuzhiyun (struct megasas_instance *)instance_addr;
2269*4882a593Smuzhiyun unsigned long flags;
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun /* If we have already declared adapter dead, donot complete cmds */
2272*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2273*4882a593Smuzhiyun return;
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun spin_lock_irqsave(&instance->completion_lock, flags);
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun producer = le32_to_cpu(*instance->producer);
2278*4882a593Smuzhiyun consumer = le32_to_cpu(*instance->consumer);
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun while (consumer != producer) {
2281*4882a593Smuzhiyun context = le32_to_cpu(instance->reply_queue[consumer]);
2282*4882a593Smuzhiyun if (context >= instance->max_fw_cmds) {
2283*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2284*4882a593Smuzhiyun context);
2285*4882a593Smuzhiyun BUG();
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun cmd = instance->cmd_list[context];
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun megasas_complete_cmd(instance, cmd, DID_OK);
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun consumer++;
2293*4882a593Smuzhiyun if (consumer == (instance->max_fw_cmds + 1)) {
2294*4882a593Smuzhiyun consumer = 0;
2295*4882a593Smuzhiyun }
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun *instance->consumer = cpu_to_le32(producer);
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->completion_lock, flags);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun /*
2303*4882a593Smuzhiyun * Check if we can restore can_queue
2304*4882a593Smuzhiyun */
2305*4882a593Smuzhiyun megasas_check_and_restore_queue_depth(instance);
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun /**
2311*4882a593Smuzhiyun * megasas_start_timer - Initializes sriov heartbeat timer object
2312*4882a593Smuzhiyun * @instance: Adapter soft state
2313*4882a593Smuzhiyun *
2314*4882a593Smuzhiyun */
megasas_start_timer(struct megasas_instance * instance)2315*4882a593Smuzhiyun void megasas_start_timer(struct megasas_instance *instance)
2316*4882a593Smuzhiyun {
2317*4882a593Smuzhiyun struct timer_list *timer = &instance->sriov_heartbeat_timer;
2318*4882a593Smuzhiyun
2319*4882a593Smuzhiyun timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2320*4882a593Smuzhiyun timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2321*4882a593Smuzhiyun add_timer(timer);
2322*4882a593Smuzhiyun }
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun static void
2325*4882a593Smuzhiyun megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun static void
2328*4882a593Smuzhiyun process_fw_state_change_wq(struct work_struct *work);
2329*4882a593Smuzhiyun
megasas_do_ocr(struct megasas_instance * instance)2330*4882a593Smuzhiyun static void megasas_do_ocr(struct megasas_instance *instance)
2331*4882a593Smuzhiyun {
2332*4882a593Smuzhiyun if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2333*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2334*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2335*4882a593Smuzhiyun *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
2338*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2339*4882a593Smuzhiyun instance->issuepend_done = 0;
2340*4882a593Smuzhiyun
2341*4882a593Smuzhiyun atomic_set(&instance->fw_outstanding, 0);
2342*4882a593Smuzhiyun megasas_internal_reset_defer_cmds(instance);
2343*4882a593Smuzhiyun process_fw_state_change_wq(&instance->work_init);
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun
megasas_get_ld_vf_affiliation_111(struct megasas_instance * instance,int initial)2346*4882a593Smuzhiyun static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2347*4882a593Smuzhiyun int initial)
2348*4882a593Smuzhiyun {
2349*4882a593Smuzhiyun struct megasas_cmd *cmd;
2350*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
2351*4882a593Smuzhiyun struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2352*4882a593Smuzhiyun dma_addr_t new_affiliation_111_h;
2353*4882a593Smuzhiyun int ld, retval = 0;
2354*4882a593Smuzhiyun u8 thisVf;
2355*4882a593Smuzhiyun
2356*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun if (!cmd) {
2359*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2360*4882a593Smuzhiyun "Failed to get cmd for scsi%d\n",
2361*4882a593Smuzhiyun instance->host->host_no);
2362*4882a593Smuzhiyun return -ENOMEM;
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun
2365*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
2366*4882a593Smuzhiyun
2367*4882a593Smuzhiyun if (!instance->vf_affiliation_111) {
2368*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2369*4882a593Smuzhiyun "affiliation for scsi%d\n", instance->host->host_no);
2370*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2371*4882a593Smuzhiyun return -ENOMEM;
2372*4882a593Smuzhiyun }
2373*4882a593Smuzhiyun
2374*4882a593Smuzhiyun if (initial)
2375*4882a593Smuzhiyun memset(instance->vf_affiliation_111, 0,
2376*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111));
2377*4882a593Smuzhiyun else {
2378*4882a593Smuzhiyun new_affiliation_111 =
2379*4882a593Smuzhiyun dma_alloc_coherent(&instance->pdev->dev,
2380*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111),
2381*4882a593Smuzhiyun &new_affiliation_111_h, GFP_KERNEL);
2382*4882a593Smuzhiyun if (!new_affiliation_111) {
2383*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2384*4882a593Smuzhiyun "memory for new affiliation for scsi%d\n",
2385*4882a593Smuzhiyun instance->host->host_no);
2386*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2387*4882a593Smuzhiyun return -ENOMEM;
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun }
2390*4882a593Smuzhiyun
2391*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
2394*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2395*4882a593Smuzhiyun dcmd->sge_count = 1;
2396*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2397*4882a593Smuzhiyun dcmd->timeout = 0;
2398*4882a593Smuzhiyun dcmd->pad_0 = 0;
2399*4882a593Smuzhiyun dcmd->data_xfer_len =
2400*4882a593Smuzhiyun cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2401*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2402*4882a593Smuzhiyun
2403*4882a593Smuzhiyun if (initial)
2404*4882a593Smuzhiyun dcmd->sgl.sge32[0].phys_addr =
2405*4882a593Smuzhiyun cpu_to_le32(instance->vf_affiliation_111_h);
2406*4882a593Smuzhiyun else
2407*4882a593Smuzhiyun dcmd->sgl.sge32[0].phys_addr =
2408*4882a593Smuzhiyun cpu_to_le32(new_affiliation_111_h);
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun dcmd->sgl.sge32[0].length = cpu_to_le32(
2411*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111));
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2414*4882a593Smuzhiyun "scsi%d\n", instance->host->host_no);
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2417*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2418*4882a593Smuzhiyun " failed with status 0x%x for scsi%d\n",
2419*4882a593Smuzhiyun dcmd->cmd_status, instance->host->host_no);
2420*4882a593Smuzhiyun retval = 1; /* Do a scan if we couldn't get affiliation */
2421*4882a593Smuzhiyun goto out;
2422*4882a593Smuzhiyun }
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun if (!initial) {
2425*4882a593Smuzhiyun thisVf = new_affiliation_111->thisVf;
2426*4882a593Smuzhiyun for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2427*4882a593Smuzhiyun if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2428*4882a593Smuzhiyun new_affiliation_111->map[ld].policy[thisVf]) {
2429*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: "
2430*4882a593Smuzhiyun "Got new LD/VF affiliation for scsi%d\n",
2431*4882a593Smuzhiyun instance->host->host_no);
2432*4882a593Smuzhiyun memcpy(instance->vf_affiliation_111,
2433*4882a593Smuzhiyun new_affiliation_111,
2434*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111));
2435*4882a593Smuzhiyun retval = 1;
2436*4882a593Smuzhiyun goto out;
2437*4882a593Smuzhiyun }
2438*4882a593Smuzhiyun }
2439*4882a593Smuzhiyun out:
2440*4882a593Smuzhiyun if (new_affiliation_111) {
2441*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
2442*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111),
2443*4882a593Smuzhiyun new_affiliation_111,
2444*4882a593Smuzhiyun new_affiliation_111_h);
2445*4882a593Smuzhiyun }
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2448*4882a593Smuzhiyun
2449*4882a593Smuzhiyun return retval;
2450*4882a593Smuzhiyun }
2451*4882a593Smuzhiyun
megasas_get_ld_vf_affiliation_12(struct megasas_instance * instance,int initial)2452*4882a593Smuzhiyun static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2453*4882a593Smuzhiyun int initial)
2454*4882a593Smuzhiyun {
2455*4882a593Smuzhiyun struct megasas_cmd *cmd;
2456*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
2457*4882a593Smuzhiyun struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2458*4882a593Smuzhiyun struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2459*4882a593Smuzhiyun dma_addr_t new_affiliation_h;
2460*4882a593Smuzhiyun int i, j, retval = 0, found = 0, doscan = 0;
2461*4882a593Smuzhiyun u8 thisVf;
2462*4882a593Smuzhiyun
2463*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
2464*4882a593Smuzhiyun
2465*4882a593Smuzhiyun if (!cmd) {
2466*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2467*4882a593Smuzhiyun "Failed to get cmd for scsi%d\n",
2468*4882a593Smuzhiyun instance->host->host_no);
2469*4882a593Smuzhiyun return -ENOMEM;
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun
2472*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun if (!instance->vf_affiliation) {
2475*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2476*4882a593Smuzhiyun "affiliation for scsi%d\n", instance->host->host_no);
2477*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2478*4882a593Smuzhiyun return -ENOMEM;
2479*4882a593Smuzhiyun }
2480*4882a593Smuzhiyun
2481*4882a593Smuzhiyun if (initial)
2482*4882a593Smuzhiyun memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2483*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION));
2484*4882a593Smuzhiyun else {
2485*4882a593Smuzhiyun new_affiliation =
2486*4882a593Smuzhiyun dma_alloc_coherent(&instance->pdev->dev,
2487*4882a593Smuzhiyun (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2488*4882a593Smuzhiyun &new_affiliation_h, GFP_KERNEL);
2489*4882a593Smuzhiyun if (!new_affiliation) {
2490*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2491*4882a593Smuzhiyun "memory for new affiliation for scsi%d\n",
2492*4882a593Smuzhiyun instance->host->host_no);
2493*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2494*4882a593Smuzhiyun return -ENOMEM;
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun }
2497*4882a593Smuzhiyun
2498*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2499*4882a593Smuzhiyun
2500*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
2501*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2502*4882a593Smuzhiyun dcmd->sge_count = 1;
2503*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2504*4882a593Smuzhiyun dcmd->timeout = 0;
2505*4882a593Smuzhiyun dcmd->pad_0 = 0;
2506*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2507*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION));
2508*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun if (initial)
2511*4882a593Smuzhiyun dcmd->sgl.sge32[0].phys_addr =
2512*4882a593Smuzhiyun cpu_to_le32(instance->vf_affiliation_h);
2513*4882a593Smuzhiyun else
2514*4882a593Smuzhiyun dcmd->sgl.sge32[0].phys_addr =
2515*4882a593Smuzhiyun cpu_to_le32(new_affiliation_h);
2516*4882a593Smuzhiyun
2517*4882a593Smuzhiyun dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2518*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION));
2519*4882a593Smuzhiyun
2520*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2521*4882a593Smuzhiyun "scsi%d\n", instance->host->host_no);
2522*4882a593Smuzhiyun
2523*4882a593Smuzhiyun
2524*4882a593Smuzhiyun if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2525*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2526*4882a593Smuzhiyun " failed with status 0x%x for scsi%d\n",
2527*4882a593Smuzhiyun dcmd->cmd_status, instance->host->host_no);
2528*4882a593Smuzhiyun retval = 1; /* Do a scan if we couldn't get affiliation */
2529*4882a593Smuzhiyun goto out;
2530*4882a593Smuzhiyun }
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun if (!initial) {
2533*4882a593Smuzhiyun if (!new_affiliation->ldCount) {
2534*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2535*4882a593Smuzhiyun "affiliation for passive path for scsi%d\n",
2536*4882a593Smuzhiyun instance->host->host_no);
2537*4882a593Smuzhiyun retval = 1;
2538*4882a593Smuzhiyun goto out;
2539*4882a593Smuzhiyun }
2540*4882a593Smuzhiyun newmap = new_affiliation->map;
2541*4882a593Smuzhiyun savedmap = instance->vf_affiliation->map;
2542*4882a593Smuzhiyun thisVf = new_affiliation->thisVf;
2543*4882a593Smuzhiyun for (i = 0 ; i < new_affiliation->ldCount; i++) {
2544*4882a593Smuzhiyun found = 0;
2545*4882a593Smuzhiyun for (j = 0; j < instance->vf_affiliation->ldCount;
2546*4882a593Smuzhiyun j++) {
2547*4882a593Smuzhiyun if (newmap->ref.targetId ==
2548*4882a593Smuzhiyun savedmap->ref.targetId) {
2549*4882a593Smuzhiyun found = 1;
2550*4882a593Smuzhiyun if (newmap->policy[thisVf] !=
2551*4882a593Smuzhiyun savedmap->policy[thisVf]) {
2552*4882a593Smuzhiyun doscan = 1;
2553*4882a593Smuzhiyun goto out;
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun }
2556*4882a593Smuzhiyun savedmap = (struct MR_LD_VF_MAP *)
2557*4882a593Smuzhiyun ((unsigned char *)savedmap +
2558*4882a593Smuzhiyun savedmap->size);
2559*4882a593Smuzhiyun }
2560*4882a593Smuzhiyun if (!found && newmap->policy[thisVf] !=
2561*4882a593Smuzhiyun MR_LD_ACCESS_HIDDEN) {
2562*4882a593Smuzhiyun doscan = 1;
2563*4882a593Smuzhiyun goto out;
2564*4882a593Smuzhiyun }
2565*4882a593Smuzhiyun newmap = (struct MR_LD_VF_MAP *)
2566*4882a593Smuzhiyun ((unsigned char *)newmap + newmap->size);
2567*4882a593Smuzhiyun }
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun newmap = new_affiliation->map;
2570*4882a593Smuzhiyun savedmap = instance->vf_affiliation->map;
2571*4882a593Smuzhiyun
2572*4882a593Smuzhiyun for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2573*4882a593Smuzhiyun found = 0;
2574*4882a593Smuzhiyun for (j = 0 ; j < new_affiliation->ldCount; j++) {
2575*4882a593Smuzhiyun if (savedmap->ref.targetId ==
2576*4882a593Smuzhiyun newmap->ref.targetId) {
2577*4882a593Smuzhiyun found = 1;
2578*4882a593Smuzhiyun if (savedmap->policy[thisVf] !=
2579*4882a593Smuzhiyun newmap->policy[thisVf]) {
2580*4882a593Smuzhiyun doscan = 1;
2581*4882a593Smuzhiyun goto out;
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun newmap = (struct MR_LD_VF_MAP *)
2585*4882a593Smuzhiyun ((unsigned char *)newmap +
2586*4882a593Smuzhiyun newmap->size);
2587*4882a593Smuzhiyun }
2588*4882a593Smuzhiyun if (!found && savedmap->policy[thisVf] !=
2589*4882a593Smuzhiyun MR_LD_ACCESS_HIDDEN) {
2590*4882a593Smuzhiyun doscan = 1;
2591*4882a593Smuzhiyun goto out;
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun savedmap = (struct MR_LD_VF_MAP *)
2594*4882a593Smuzhiyun ((unsigned char *)savedmap +
2595*4882a593Smuzhiyun savedmap->size);
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun }
2598*4882a593Smuzhiyun out:
2599*4882a593Smuzhiyun if (doscan) {
2600*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2601*4882a593Smuzhiyun "affiliation for scsi%d\n", instance->host->host_no);
2602*4882a593Smuzhiyun memcpy(instance->vf_affiliation, new_affiliation,
2603*4882a593Smuzhiyun new_affiliation->size);
2604*4882a593Smuzhiyun retval = 1;
2605*4882a593Smuzhiyun }
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun if (new_affiliation)
2608*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
2609*4882a593Smuzhiyun (MAX_LOGICAL_DRIVES + 1) *
2610*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION),
2611*4882a593Smuzhiyun new_affiliation, new_affiliation_h);
2612*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2613*4882a593Smuzhiyun
2614*4882a593Smuzhiyun return retval;
2615*4882a593Smuzhiyun }
2616*4882a593Smuzhiyun
2617*4882a593Smuzhiyun /* This function will get the current SR-IOV LD/VF affiliation */
megasas_get_ld_vf_affiliation(struct megasas_instance * instance,int initial)2618*4882a593Smuzhiyun static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2619*4882a593Smuzhiyun int initial)
2620*4882a593Smuzhiyun {
2621*4882a593Smuzhiyun int retval;
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun if (instance->PlasmaFW111)
2624*4882a593Smuzhiyun retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2625*4882a593Smuzhiyun else
2626*4882a593Smuzhiyun retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2627*4882a593Smuzhiyun return retval;
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun
2630*4882a593Smuzhiyun /* This function will tell FW to start the SR-IOV heartbeat */
megasas_sriov_start_heartbeat(struct megasas_instance * instance,int initial)2631*4882a593Smuzhiyun int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2632*4882a593Smuzhiyun int initial)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun struct megasas_cmd *cmd;
2635*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
2636*4882a593Smuzhiyun int retval = 0;
2637*4882a593Smuzhiyun
2638*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun if (!cmd) {
2641*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2642*4882a593Smuzhiyun "Failed to get cmd for scsi%d\n",
2643*4882a593Smuzhiyun instance->host->host_no);
2644*4882a593Smuzhiyun return -ENOMEM;
2645*4882a593Smuzhiyun }
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun if (initial) {
2650*4882a593Smuzhiyun instance->hb_host_mem =
2651*4882a593Smuzhiyun dma_alloc_coherent(&instance->pdev->dev,
2652*4882a593Smuzhiyun sizeof(struct MR_CTRL_HB_HOST_MEM),
2653*4882a593Smuzhiyun &instance->hb_host_mem_h,
2654*4882a593Smuzhiyun GFP_KERNEL);
2655*4882a593Smuzhiyun if (!instance->hb_host_mem) {
2656*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2657*4882a593Smuzhiyun " memory for heartbeat host memory for scsi%d\n",
2658*4882a593Smuzhiyun instance->host->host_no);
2659*4882a593Smuzhiyun retval = -ENOMEM;
2660*4882a593Smuzhiyun goto out;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun }
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2667*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
2668*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2669*4882a593Smuzhiyun dcmd->sge_count = 1;
2670*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2671*4882a593Smuzhiyun dcmd->timeout = 0;
2672*4882a593Smuzhiyun dcmd->pad_0 = 0;
2673*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2674*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2677*4882a593Smuzhiyun sizeof(struct MR_CTRL_HB_HOST_MEM));
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2680*4882a593Smuzhiyun instance->host->host_no);
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
2683*4882a593Smuzhiyun !instance->mask_interrupts)
2684*4882a593Smuzhiyun retval = megasas_issue_blocked_cmd(instance, cmd,
2685*4882a593Smuzhiyun MEGASAS_ROUTINE_WAIT_TIME_VF);
2686*4882a593Smuzhiyun else
2687*4882a593Smuzhiyun retval = megasas_issue_polled(instance, cmd);
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun if (retval) {
2690*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2691*4882a593Smuzhiyun "_MEM_ALLOC DCMD %s for scsi%d\n",
2692*4882a593Smuzhiyun (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2693*4882a593Smuzhiyun "timed out" : "failed", instance->host->host_no);
2694*4882a593Smuzhiyun retval = 1;
2695*4882a593Smuzhiyun }
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun out:
2698*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
2699*4882a593Smuzhiyun
2700*4882a593Smuzhiyun return retval;
2701*4882a593Smuzhiyun }
2702*4882a593Smuzhiyun
2703*4882a593Smuzhiyun /* Handler for SR-IOV heartbeat */
megasas_sriov_heartbeat_handler(struct timer_list * t)2704*4882a593Smuzhiyun static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun struct megasas_instance *instance =
2707*4882a593Smuzhiyun from_timer(instance, t, sriov_heartbeat_timer);
2708*4882a593Smuzhiyun
2709*4882a593Smuzhiyun if (instance->hb_host_mem->HB.fwCounter !=
2710*4882a593Smuzhiyun instance->hb_host_mem->HB.driverCounter) {
2711*4882a593Smuzhiyun instance->hb_host_mem->HB.driverCounter =
2712*4882a593Smuzhiyun instance->hb_host_mem->HB.fwCounter;
2713*4882a593Smuzhiyun mod_timer(&instance->sriov_heartbeat_timer,
2714*4882a593Smuzhiyun jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2715*4882a593Smuzhiyun } else {
2716*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2717*4882a593Smuzhiyun "completed for scsi%d\n", instance->host->host_no);
2718*4882a593Smuzhiyun schedule_work(&instance->work_init);
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun
2722*4882a593Smuzhiyun /**
2723*4882a593Smuzhiyun * megasas_wait_for_outstanding - Wait for all outstanding cmds
2724*4882a593Smuzhiyun * @instance: Adapter soft state
2725*4882a593Smuzhiyun *
2726*4882a593Smuzhiyun * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2727*4882a593Smuzhiyun * complete all its outstanding commands. Returns error if one or more IOs
2728*4882a593Smuzhiyun * are pending after this time period. It also marks the controller dead.
2729*4882a593Smuzhiyun */
megasas_wait_for_outstanding(struct megasas_instance * instance)2730*4882a593Smuzhiyun static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2731*4882a593Smuzhiyun {
2732*4882a593Smuzhiyun int i, sl, outstanding;
2733*4882a593Smuzhiyun u32 reset_index;
2734*4882a593Smuzhiyun u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2735*4882a593Smuzhiyun unsigned long flags;
2736*4882a593Smuzhiyun struct list_head clist_local;
2737*4882a593Smuzhiyun struct megasas_cmd *reset_cmd;
2738*4882a593Smuzhiyun u32 fw_state;
2739*4882a593Smuzhiyun
2740*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2741*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2742*4882a593Smuzhiyun __func__, __LINE__);
2743*4882a593Smuzhiyun return FAILED;
2744*4882a593Smuzhiyun }
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2747*4882a593Smuzhiyun
2748*4882a593Smuzhiyun INIT_LIST_HEAD(&clist_local);
2749*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
2750*4882a593Smuzhiyun list_splice_init(&instance->internal_reset_pending_q,
2751*4882a593Smuzhiyun &clist_local);
2752*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2755*4882a593Smuzhiyun for (i = 0; i < wait_time; i++) {
2756*4882a593Smuzhiyun msleep(1000);
2757*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2758*4882a593Smuzhiyun break;
2759*4882a593Smuzhiyun }
2760*4882a593Smuzhiyun
2761*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2762*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2763*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2764*4882a593Smuzhiyun return FAILED;
2765*4882a593Smuzhiyun }
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun reset_index = 0;
2768*4882a593Smuzhiyun while (!list_empty(&clist_local)) {
2769*4882a593Smuzhiyun reset_cmd = list_entry((&clist_local)->next,
2770*4882a593Smuzhiyun struct megasas_cmd, list);
2771*4882a593Smuzhiyun list_del_init(&reset_cmd->list);
2772*4882a593Smuzhiyun if (reset_cmd->scmd) {
2773*4882a593Smuzhiyun reset_cmd->scmd->result = DID_REQUEUE << 16;
2774*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2775*4882a593Smuzhiyun reset_index, reset_cmd,
2776*4882a593Smuzhiyun reset_cmd->scmd->cmnd[0]);
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2779*4882a593Smuzhiyun megasas_return_cmd(instance, reset_cmd);
2780*4882a593Smuzhiyun } else if (reset_cmd->sync_cmd) {
2781*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%p synch cmds"
2782*4882a593Smuzhiyun "reset queue\n",
2783*4882a593Smuzhiyun reset_cmd);
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun reset_cmd->cmd_status_drv = DCMD_INIT;
2786*4882a593Smuzhiyun instance->instancet->fire_cmd(instance,
2787*4882a593Smuzhiyun reset_cmd->frame_phys_addr,
2788*4882a593Smuzhiyun 0, instance->reg_set);
2789*4882a593Smuzhiyun } else {
2790*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%p unexpected"
2791*4882a593Smuzhiyun "cmds lst\n",
2792*4882a593Smuzhiyun reset_cmd);
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun reset_index++;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun return SUCCESS;
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun for (i = 0; i < resetwaittime; i++) {
2801*4882a593Smuzhiyun outstanding = atomic_read(&instance->fw_outstanding);
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun if (!outstanding)
2804*4882a593Smuzhiyun break;
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2807*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2808*4882a593Smuzhiyun "commands to complete\n",i,outstanding);
2809*4882a593Smuzhiyun /*
2810*4882a593Smuzhiyun * Call cmd completion routine. Cmd to be
2811*4882a593Smuzhiyun * be completed directly without depending on isr.
2812*4882a593Smuzhiyun */
2813*4882a593Smuzhiyun megasas_complete_cmd_dpc((unsigned long)instance);
2814*4882a593Smuzhiyun }
2815*4882a593Smuzhiyun
2816*4882a593Smuzhiyun msleep(1000);
2817*4882a593Smuzhiyun }
2818*4882a593Smuzhiyun
2819*4882a593Smuzhiyun i = 0;
2820*4882a593Smuzhiyun outstanding = atomic_read(&instance->fw_outstanding);
2821*4882a593Smuzhiyun fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2824*4882a593Smuzhiyun goto no_outstanding;
2825*4882a593Smuzhiyun
2826*4882a593Smuzhiyun if (instance->disableOnlineCtrlReset)
2827*4882a593Smuzhiyun goto kill_hba_and_failed;
2828*4882a593Smuzhiyun do {
2829*4882a593Smuzhiyun if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2830*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
2831*4882a593Smuzhiyun "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2832*4882a593Smuzhiyun __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2833*4882a593Smuzhiyun if (i == 3)
2834*4882a593Smuzhiyun goto kill_hba_and_failed;
2835*4882a593Smuzhiyun megasas_do_ocr(instance);
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2838*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2839*4882a593Smuzhiyun __func__, __LINE__);
2840*4882a593Smuzhiyun return FAILED;
2841*4882a593Smuzhiyun }
2842*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2843*4882a593Smuzhiyun __func__, __LINE__);
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun for (sl = 0; sl < 10; sl++)
2846*4882a593Smuzhiyun msleep(500);
2847*4882a593Smuzhiyun
2848*4882a593Smuzhiyun outstanding = atomic_read(&instance->fw_outstanding);
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2851*4882a593Smuzhiyun if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2852*4882a593Smuzhiyun goto no_outstanding;
2853*4882a593Smuzhiyun }
2854*4882a593Smuzhiyun i++;
2855*4882a593Smuzhiyun } while (i <= 3);
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun no_outstanding:
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2860*4882a593Smuzhiyun __func__, __LINE__);
2861*4882a593Smuzhiyun return SUCCESS;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun kill_hba_and_failed:
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun /* Reset not supported, kill adapter */
2866*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2867*4882a593Smuzhiyun " disableOnlineCtrlReset %d fw_outstanding %d \n",
2868*4882a593Smuzhiyun __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2869*4882a593Smuzhiyun atomic_read(&instance->fw_outstanding));
2870*4882a593Smuzhiyun megasas_dump_pending_frames(instance);
2871*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun return FAILED;
2874*4882a593Smuzhiyun }
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun /**
2877*4882a593Smuzhiyun * megasas_generic_reset - Generic reset routine
2878*4882a593Smuzhiyun * @scmd: Mid-layer SCSI command
2879*4882a593Smuzhiyun *
2880*4882a593Smuzhiyun * This routine implements a generic reset handler for device, bus and host
2881*4882a593Smuzhiyun * reset requests. Device, bus and host specific reset handlers can use this
2882*4882a593Smuzhiyun * function after they do their specific tasks.
2883*4882a593Smuzhiyun */
megasas_generic_reset(struct scsi_cmnd * scmd)2884*4882a593Smuzhiyun static int megasas_generic_reset(struct scsi_cmnd *scmd)
2885*4882a593Smuzhiyun {
2886*4882a593Smuzhiyun int ret_val;
2887*4882a593Smuzhiyun struct megasas_instance *instance;
2888*4882a593Smuzhiyun
2889*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
2890*4882a593Smuzhiyun
2891*4882a593Smuzhiyun scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2892*4882a593Smuzhiyun scmd->cmnd[0], scmd->retries);
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2895*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2896*4882a593Smuzhiyun return FAILED;
2897*4882a593Smuzhiyun }
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun ret_val = megasas_wait_for_outstanding(instance);
2900*4882a593Smuzhiyun if (ret_val == SUCCESS)
2901*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "reset successful\n");
2902*4882a593Smuzhiyun else
2903*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "failed to do reset\n");
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun return ret_val;
2906*4882a593Smuzhiyun }
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun /**
2909*4882a593Smuzhiyun * megasas_reset_timer - quiesce the adapter if required
2910*4882a593Smuzhiyun * @scmd: scsi cmnd
2911*4882a593Smuzhiyun *
2912*4882a593Smuzhiyun * Sets the FW busy flag and reduces the host->can_queue if the
2913*4882a593Smuzhiyun * cmd has not been completed within the timeout period.
2914*4882a593Smuzhiyun */
2915*4882a593Smuzhiyun static enum
megasas_reset_timer(struct scsi_cmnd * scmd)2916*4882a593Smuzhiyun blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2917*4882a593Smuzhiyun {
2918*4882a593Smuzhiyun struct megasas_instance *instance;
2919*4882a593Smuzhiyun unsigned long flags;
2920*4882a593Smuzhiyun
2921*4882a593Smuzhiyun if (time_after(jiffies, scmd->jiffies_at_alloc +
2922*4882a593Smuzhiyun (scmd_timeout * 2) * HZ)) {
2923*4882a593Smuzhiyun return BLK_EH_DONE;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun
2926*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
2927*4882a593Smuzhiyun if (!(instance->flag & MEGASAS_FW_BUSY)) {
2928*4882a593Smuzhiyun /* FW is busy, throttle IO */
2929*4882a593Smuzhiyun spin_lock_irqsave(instance->host->host_lock, flags);
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun instance->host->can_queue = instance->throttlequeuedepth;
2932*4882a593Smuzhiyun instance->last_time = jiffies;
2933*4882a593Smuzhiyun instance->flag |= MEGASAS_FW_BUSY;
2934*4882a593Smuzhiyun
2935*4882a593Smuzhiyun spin_unlock_irqrestore(instance->host->host_lock, flags);
2936*4882a593Smuzhiyun }
2937*4882a593Smuzhiyun return BLK_EH_RESET_TIMER;
2938*4882a593Smuzhiyun }
2939*4882a593Smuzhiyun
2940*4882a593Smuzhiyun /**
2941*4882a593Smuzhiyun * megasas_dump - This function will print hexdump of provided buffer.
2942*4882a593Smuzhiyun * @buf: Buffer to be dumped
2943*4882a593Smuzhiyun * @sz: Size in bytes
2944*4882a593Smuzhiyun * @format: Different formats of dumping e.g. format=n will
2945*4882a593Smuzhiyun * cause only 'n' 32 bit words to be dumped in a single
2946*4882a593Smuzhiyun * line.
2947*4882a593Smuzhiyun */
2948*4882a593Smuzhiyun inline void
megasas_dump(void * buf,int sz,int format)2949*4882a593Smuzhiyun megasas_dump(void *buf, int sz, int format)
2950*4882a593Smuzhiyun {
2951*4882a593Smuzhiyun int i;
2952*4882a593Smuzhiyun __le32 *buf_loc = (__le32 *)buf;
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun for (i = 0; i < (sz / sizeof(__le32)); i++) {
2955*4882a593Smuzhiyun if ((i % format) == 0) {
2956*4882a593Smuzhiyun if (i != 0)
2957*4882a593Smuzhiyun printk(KERN_CONT "\n");
2958*4882a593Smuzhiyun printk(KERN_CONT "%08x: ", (i * 4));
2959*4882a593Smuzhiyun }
2960*4882a593Smuzhiyun printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2961*4882a593Smuzhiyun }
2962*4882a593Smuzhiyun printk(KERN_CONT "\n");
2963*4882a593Smuzhiyun }
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun /**
2966*4882a593Smuzhiyun * megasas_dump_reg_set - This function will print hexdump of register set
2967*4882a593Smuzhiyun * @reg_set: Register set to be dumped
2968*4882a593Smuzhiyun */
2969*4882a593Smuzhiyun inline void
megasas_dump_reg_set(void __iomem * reg_set)2970*4882a593Smuzhiyun megasas_dump_reg_set(void __iomem *reg_set)
2971*4882a593Smuzhiyun {
2972*4882a593Smuzhiyun unsigned int i, sz = 256;
2973*4882a593Smuzhiyun u32 __iomem *reg = (u32 __iomem *)reg_set;
2974*4882a593Smuzhiyun
2975*4882a593Smuzhiyun for (i = 0; i < (sz / sizeof(u32)); i++)
2976*4882a593Smuzhiyun printk("%08x: %08x\n", (i * 4), readl(®[i]));
2977*4882a593Smuzhiyun }
2978*4882a593Smuzhiyun
2979*4882a593Smuzhiyun /**
2980*4882a593Smuzhiyun * megasas_dump_fusion_io - This function will print key details
2981*4882a593Smuzhiyun * of SCSI IO
2982*4882a593Smuzhiyun * @scmd: SCSI command pointer of SCSI IO
2983*4882a593Smuzhiyun */
2984*4882a593Smuzhiyun void
megasas_dump_fusion_io(struct scsi_cmnd * scmd)2985*4882a593Smuzhiyun megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2986*4882a593Smuzhiyun {
2987*4882a593Smuzhiyun struct megasas_cmd_fusion *cmd;
2988*4882a593Smuzhiyun union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2989*4882a593Smuzhiyun struct megasas_instance *instance;
2990*4882a593Smuzhiyun
2991*4882a593Smuzhiyun cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2992*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
2993*4882a593Smuzhiyun
2994*4882a593Smuzhiyun scmd_printk(KERN_INFO, scmd,
2995*4882a593Smuzhiyun "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
2996*4882a593Smuzhiyun scmd, scmd->retries, scmd->allowed);
2997*4882a593Smuzhiyun scsi_print_command(scmd);
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun if (cmd) {
3000*4882a593Smuzhiyun req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
3001*4882a593Smuzhiyun scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
3002*4882a593Smuzhiyun scmd_printk(KERN_INFO, scmd,
3003*4882a593Smuzhiyun "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
3004*4882a593Smuzhiyun req_desc->SCSIIO.RequestFlags,
3005*4882a593Smuzhiyun req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
3006*4882a593Smuzhiyun req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
3007*4882a593Smuzhiyun
3008*4882a593Smuzhiyun printk(KERN_INFO "IO request frame:\n");
3009*4882a593Smuzhiyun megasas_dump(cmd->io_request,
3010*4882a593Smuzhiyun MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
3011*4882a593Smuzhiyun printk(KERN_INFO "Chain frame:\n");
3012*4882a593Smuzhiyun megasas_dump(cmd->sg_frame,
3013*4882a593Smuzhiyun instance->max_chain_frame_sz, 8);
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun }
3017*4882a593Smuzhiyun
3018*4882a593Smuzhiyun /*
3019*4882a593Smuzhiyun * megasas_dump_sys_regs - This function will dump system registers through
3020*4882a593Smuzhiyun * sysfs.
3021*4882a593Smuzhiyun * @reg_set: Pointer to System register set.
3022*4882a593Smuzhiyun * @buf: Buffer to which output is to be written.
3023*4882a593Smuzhiyun * @return: Number of bytes written to buffer.
3024*4882a593Smuzhiyun */
3025*4882a593Smuzhiyun static inline ssize_t
megasas_dump_sys_regs(void __iomem * reg_set,char * buf)3026*4882a593Smuzhiyun megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
3027*4882a593Smuzhiyun {
3028*4882a593Smuzhiyun unsigned int i, sz = 256;
3029*4882a593Smuzhiyun int bytes_wrote = 0;
3030*4882a593Smuzhiyun char *loc = (char *)buf;
3031*4882a593Smuzhiyun u32 __iomem *reg = (u32 __iomem *)reg_set;
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun for (i = 0; i < sz / sizeof(u32); i++) {
3034*4882a593Smuzhiyun bytes_wrote += scnprintf(loc + bytes_wrote,
3035*4882a593Smuzhiyun PAGE_SIZE - bytes_wrote,
3036*4882a593Smuzhiyun "%08x: %08x\n", (i * 4),
3037*4882a593Smuzhiyun readl(®[i]));
3038*4882a593Smuzhiyun }
3039*4882a593Smuzhiyun return bytes_wrote;
3040*4882a593Smuzhiyun }
3041*4882a593Smuzhiyun
3042*4882a593Smuzhiyun /**
3043*4882a593Smuzhiyun * megasas_reset_bus_host - Bus & host reset handler entry point
3044*4882a593Smuzhiyun * @scmd: Mid-layer SCSI command
3045*4882a593Smuzhiyun */
megasas_reset_bus_host(struct scsi_cmnd * scmd)3046*4882a593Smuzhiyun static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3047*4882a593Smuzhiyun {
3048*4882a593Smuzhiyun int ret;
3049*4882a593Smuzhiyun struct megasas_instance *instance;
3050*4882a593Smuzhiyun
3051*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun scmd_printk(KERN_INFO, scmd,
3054*4882a593Smuzhiyun "OCR is requested due to IO timeout!!\n");
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun scmd_printk(KERN_INFO, scmd,
3057*4882a593Smuzhiyun "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
3058*4882a593Smuzhiyun scmd->device->host->shost_state,
3059*4882a593Smuzhiyun scsi_host_busy(scmd->device->host),
3060*4882a593Smuzhiyun atomic_read(&instance->fw_outstanding));
3061*4882a593Smuzhiyun /*
3062*4882a593Smuzhiyun * First wait for all commands to complete
3063*4882a593Smuzhiyun */
3064*4882a593Smuzhiyun if (instance->adapter_type == MFI_SERIES) {
3065*4882a593Smuzhiyun ret = megasas_generic_reset(scmd);
3066*4882a593Smuzhiyun } else {
3067*4882a593Smuzhiyun megasas_dump_fusion_io(scmd);
3068*4882a593Smuzhiyun ret = megasas_reset_fusion(scmd->device->host,
3069*4882a593Smuzhiyun SCSIIO_TIMEOUT_OCR);
3070*4882a593Smuzhiyun }
3071*4882a593Smuzhiyun
3072*4882a593Smuzhiyun return ret;
3073*4882a593Smuzhiyun }
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun /**
3076*4882a593Smuzhiyun * megasas_task_abort - Issues task abort request to firmware
3077*4882a593Smuzhiyun * (supported only for fusion adapters)
3078*4882a593Smuzhiyun * @scmd: SCSI command pointer
3079*4882a593Smuzhiyun */
megasas_task_abort(struct scsi_cmnd * scmd)3080*4882a593Smuzhiyun static int megasas_task_abort(struct scsi_cmnd *scmd)
3081*4882a593Smuzhiyun {
3082*4882a593Smuzhiyun int ret;
3083*4882a593Smuzhiyun struct megasas_instance *instance;
3084*4882a593Smuzhiyun
3085*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
3088*4882a593Smuzhiyun ret = megasas_task_abort_fusion(scmd);
3089*4882a593Smuzhiyun else {
3090*4882a593Smuzhiyun sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3091*4882a593Smuzhiyun ret = FAILED;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun return ret;
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun
3097*4882a593Smuzhiyun /**
3098*4882a593Smuzhiyun * megasas_reset_target: Issues target reset request to firmware
3099*4882a593Smuzhiyun * (supported only for fusion adapters)
3100*4882a593Smuzhiyun * @scmd: SCSI command pointer
3101*4882a593Smuzhiyun */
megasas_reset_target(struct scsi_cmnd * scmd)3102*4882a593Smuzhiyun static int megasas_reset_target(struct scsi_cmnd *scmd)
3103*4882a593Smuzhiyun {
3104*4882a593Smuzhiyun int ret;
3105*4882a593Smuzhiyun struct megasas_instance *instance;
3106*4882a593Smuzhiyun
3107*4882a593Smuzhiyun instance = (struct megasas_instance *)scmd->device->host->hostdata;
3108*4882a593Smuzhiyun
3109*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
3110*4882a593Smuzhiyun ret = megasas_reset_target_fusion(scmd);
3111*4882a593Smuzhiyun else {
3112*4882a593Smuzhiyun sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3113*4882a593Smuzhiyun ret = FAILED;
3114*4882a593Smuzhiyun }
3115*4882a593Smuzhiyun
3116*4882a593Smuzhiyun return ret;
3117*4882a593Smuzhiyun }
3118*4882a593Smuzhiyun
3119*4882a593Smuzhiyun /**
3120*4882a593Smuzhiyun * megasas_bios_param - Returns disk geometry for a disk
3121*4882a593Smuzhiyun * @sdev: device handle
3122*4882a593Smuzhiyun * @bdev: block device
3123*4882a593Smuzhiyun * @capacity: drive capacity
3124*4882a593Smuzhiyun * @geom: geometry parameters
3125*4882a593Smuzhiyun */
3126*4882a593Smuzhiyun static int
megasas_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int geom[])3127*4882a593Smuzhiyun megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3128*4882a593Smuzhiyun sector_t capacity, int geom[])
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun int heads;
3131*4882a593Smuzhiyun int sectors;
3132*4882a593Smuzhiyun sector_t cylinders;
3133*4882a593Smuzhiyun unsigned long tmp;
3134*4882a593Smuzhiyun
3135*4882a593Smuzhiyun /* Default heads (64) & sectors (32) */
3136*4882a593Smuzhiyun heads = 64;
3137*4882a593Smuzhiyun sectors = 32;
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun tmp = heads * sectors;
3140*4882a593Smuzhiyun cylinders = capacity;
3141*4882a593Smuzhiyun
3142*4882a593Smuzhiyun sector_div(cylinders, tmp);
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun /*
3145*4882a593Smuzhiyun * Handle extended translation size for logical drives > 1Gb
3146*4882a593Smuzhiyun */
3147*4882a593Smuzhiyun
3148*4882a593Smuzhiyun if (capacity >= 0x200000) {
3149*4882a593Smuzhiyun heads = 255;
3150*4882a593Smuzhiyun sectors = 63;
3151*4882a593Smuzhiyun tmp = heads*sectors;
3152*4882a593Smuzhiyun cylinders = capacity;
3153*4882a593Smuzhiyun sector_div(cylinders, tmp);
3154*4882a593Smuzhiyun }
3155*4882a593Smuzhiyun
3156*4882a593Smuzhiyun geom[0] = heads;
3157*4882a593Smuzhiyun geom[1] = sectors;
3158*4882a593Smuzhiyun geom[2] = cylinders;
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun return 0;
3161*4882a593Smuzhiyun }
3162*4882a593Smuzhiyun
3163*4882a593Smuzhiyun static void megasas_aen_polling(struct work_struct *work);
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun /**
3166*4882a593Smuzhiyun * megasas_service_aen - Processes an event notification
3167*4882a593Smuzhiyun * @instance: Adapter soft state
3168*4882a593Smuzhiyun * @cmd: AEN command completed by the ISR
3169*4882a593Smuzhiyun *
3170*4882a593Smuzhiyun * For AEN, driver sends a command down to FW that is held by the FW till an
3171*4882a593Smuzhiyun * event occurs. When an event of interest occurs, FW completes the command
3172*4882a593Smuzhiyun * that it was previously holding.
3173*4882a593Smuzhiyun *
3174*4882a593Smuzhiyun * This routines sends SIGIO signal to processes that have registered with the
3175*4882a593Smuzhiyun * driver for AEN.
3176*4882a593Smuzhiyun */
3177*4882a593Smuzhiyun static void
megasas_service_aen(struct megasas_instance * instance,struct megasas_cmd * cmd)3178*4882a593Smuzhiyun megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3179*4882a593Smuzhiyun {
3180*4882a593Smuzhiyun unsigned long flags;
3181*4882a593Smuzhiyun
3182*4882a593Smuzhiyun /*
3183*4882a593Smuzhiyun * Don't signal app if it is just an aborted previously registered aen
3184*4882a593Smuzhiyun */
3185*4882a593Smuzhiyun if ((!cmd->abort_aen) && (instance->unload == 0)) {
3186*4882a593Smuzhiyun spin_lock_irqsave(&poll_aen_lock, flags);
3187*4882a593Smuzhiyun megasas_poll_wait_aen = 1;
3188*4882a593Smuzhiyun spin_unlock_irqrestore(&poll_aen_lock, flags);
3189*4882a593Smuzhiyun wake_up(&megasas_poll_wait);
3190*4882a593Smuzhiyun kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3191*4882a593Smuzhiyun }
3192*4882a593Smuzhiyun else
3193*4882a593Smuzhiyun cmd->abort_aen = 0;
3194*4882a593Smuzhiyun
3195*4882a593Smuzhiyun instance->aen_cmd = NULL;
3196*4882a593Smuzhiyun
3197*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3198*4882a593Smuzhiyun
3199*4882a593Smuzhiyun if ((instance->unload == 0) &&
3200*4882a593Smuzhiyun ((instance->issuepend_done == 1))) {
3201*4882a593Smuzhiyun struct megasas_aen_event *ev;
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3204*4882a593Smuzhiyun if (!ev) {
3205*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3206*4882a593Smuzhiyun } else {
3207*4882a593Smuzhiyun ev->instance = instance;
3208*4882a593Smuzhiyun instance->ev = ev;
3209*4882a593Smuzhiyun INIT_DELAYED_WORK(&ev->hotplug_work,
3210*4882a593Smuzhiyun megasas_aen_polling);
3211*4882a593Smuzhiyun schedule_delayed_work(&ev->hotplug_work, 0);
3212*4882a593Smuzhiyun }
3213*4882a593Smuzhiyun }
3214*4882a593Smuzhiyun }
3215*4882a593Smuzhiyun
3216*4882a593Smuzhiyun static ssize_t
fw_crash_buffer_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3217*4882a593Smuzhiyun fw_crash_buffer_store(struct device *cdev,
3218*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t count)
3219*4882a593Smuzhiyun {
3220*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3221*4882a593Smuzhiyun struct megasas_instance *instance =
3222*4882a593Smuzhiyun (struct megasas_instance *) shost->hostdata;
3223*4882a593Smuzhiyun int val = 0;
3224*4882a593Smuzhiyun unsigned long flags;
3225*4882a593Smuzhiyun
3226*4882a593Smuzhiyun if (kstrtoint(buf, 0, &val) != 0)
3227*4882a593Smuzhiyun return -EINVAL;
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun spin_lock_irqsave(&instance->crashdump_lock, flags);
3230*4882a593Smuzhiyun instance->fw_crash_buffer_offset = val;
3231*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3232*4882a593Smuzhiyun return strlen(buf);
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun
3235*4882a593Smuzhiyun static ssize_t
fw_crash_buffer_show(struct device * cdev,struct device_attribute * attr,char * buf)3236*4882a593Smuzhiyun fw_crash_buffer_show(struct device *cdev,
3237*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3238*4882a593Smuzhiyun {
3239*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3240*4882a593Smuzhiyun struct megasas_instance *instance =
3241*4882a593Smuzhiyun (struct megasas_instance *) shost->hostdata;
3242*4882a593Smuzhiyun u32 size;
3243*4882a593Smuzhiyun unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3244*4882a593Smuzhiyun unsigned long chunk_left_bytes;
3245*4882a593Smuzhiyun unsigned long src_addr;
3246*4882a593Smuzhiyun unsigned long flags;
3247*4882a593Smuzhiyun u32 buff_offset;
3248*4882a593Smuzhiyun
3249*4882a593Smuzhiyun spin_lock_irqsave(&instance->crashdump_lock, flags);
3250*4882a593Smuzhiyun buff_offset = instance->fw_crash_buffer_offset;
3251*4882a593Smuzhiyun if (!instance->crash_dump_buf &&
3252*4882a593Smuzhiyun !((instance->fw_crash_state == AVAILABLE) ||
3253*4882a593Smuzhiyun (instance->fw_crash_state == COPYING))) {
3254*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
3255*4882a593Smuzhiyun "Firmware crash dump is not available\n");
3256*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3257*4882a593Smuzhiyun return -EINVAL;
3258*4882a593Smuzhiyun }
3259*4882a593Smuzhiyun
3260*4882a593Smuzhiyun if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3261*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
3262*4882a593Smuzhiyun "Firmware crash dump offset is out of range\n");
3263*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3264*4882a593Smuzhiyun return 0;
3265*4882a593Smuzhiyun }
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3268*4882a593Smuzhiyun chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3269*4882a593Smuzhiyun size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3270*4882a593Smuzhiyun size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3271*4882a593Smuzhiyun
3272*4882a593Smuzhiyun src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3273*4882a593Smuzhiyun (buff_offset % dmachunk);
3274*4882a593Smuzhiyun memcpy(buf, (void *)src_addr, size);
3275*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3276*4882a593Smuzhiyun
3277*4882a593Smuzhiyun return size;
3278*4882a593Smuzhiyun }
3279*4882a593Smuzhiyun
3280*4882a593Smuzhiyun static ssize_t
fw_crash_buffer_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3281*4882a593Smuzhiyun fw_crash_buffer_size_show(struct device *cdev,
3282*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3283*4882a593Smuzhiyun {
3284*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3285*4882a593Smuzhiyun struct megasas_instance *instance =
3286*4882a593Smuzhiyun (struct megasas_instance *) shost->hostdata;
3287*4882a593Smuzhiyun
3288*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3289*4882a593Smuzhiyun ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun
3292*4882a593Smuzhiyun static ssize_t
fw_crash_state_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3293*4882a593Smuzhiyun fw_crash_state_store(struct device *cdev,
3294*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t count)
3295*4882a593Smuzhiyun {
3296*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3297*4882a593Smuzhiyun struct megasas_instance *instance =
3298*4882a593Smuzhiyun (struct megasas_instance *) shost->hostdata;
3299*4882a593Smuzhiyun int val = 0;
3300*4882a593Smuzhiyun unsigned long flags;
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun if (kstrtoint(buf, 0, &val) != 0)
3303*4882a593Smuzhiyun return -EINVAL;
3304*4882a593Smuzhiyun
3305*4882a593Smuzhiyun if ((val <= AVAILABLE || val > COPY_ERROR)) {
3306*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "application updates invalid "
3307*4882a593Smuzhiyun "firmware crash state\n");
3308*4882a593Smuzhiyun return -EINVAL;
3309*4882a593Smuzhiyun }
3310*4882a593Smuzhiyun
3311*4882a593Smuzhiyun instance->fw_crash_state = val;
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun if ((val == COPIED) || (val == COPY_ERROR)) {
3314*4882a593Smuzhiyun spin_lock_irqsave(&instance->crashdump_lock, flags);
3315*4882a593Smuzhiyun megasas_free_host_crash_buffer(instance);
3316*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3317*4882a593Smuzhiyun if (val == COPY_ERROR)
3318*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "application failed to "
3319*4882a593Smuzhiyun "copy Firmware crash dump\n");
3320*4882a593Smuzhiyun else
3321*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Firmware crash dump "
3322*4882a593Smuzhiyun "copied successfully\n");
3323*4882a593Smuzhiyun }
3324*4882a593Smuzhiyun return strlen(buf);
3325*4882a593Smuzhiyun }
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun static ssize_t
fw_crash_state_show(struct device * cdev,struct device_attribute * attr,char * buf)3328*4882a593Smuzhiyun fw_crash_state_show(struct device *cdev,
3329*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3330*4882a593Smuzhiyun {
3331*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3332*4882a593Smuzhiyun struct megasas_instance *instance =
3333*4882a593Smuzhiyun (struct megasas_instance *) shost->hostdata;
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3336*4882a593Smuzhiyun }
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun static ssize_t
page_size_show(struct device * cdev,struct device_attribute * attr,char * buf)3339*4882a593Smuzhiyun page_size_show(struct device *cdev,
3340*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3341*4882a593Smuzhiyun {
3342*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3343*4882a593Smuzhiyun }
3344*4882a593Smuzhiyun
3345*4882a593Smuzhiyun static ssize_t
ldio_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3346*4882a593Smuzhiyun ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3347*4882a593Smuzhiyun char *buf)
3348*4882a593Smuzhiyun {
3349*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3350*4882a593Smuzhiyun struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun static ssize_t
fw_cmds_outstanding_show(struct device * cdev,struct device_attribute * attr,char * buf)3356*4882a593Smuzhiyun fw_cmds_outstanding_show(struct device *cdev,
3357*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3360*4882a593Smuzhiyun struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3361*4882a593Smuzhiyun
3362*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun
3365*4882a593Smuzhiyun static ssize_t
enable_sdev_max_qd_show(struct device * cdev,struct device_attribute * attr,char * buf)3366*4882a593Smuzhiyun enable_sdev_max_qd_show(struct device *cdev,
3367*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3368*4882a593Smuzhiyun {
3369*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3370*4882a593Smuzhiyun struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3371*4882a593Smuzhiyun
3372*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3373*4882a593Smuzhiyun }
3374*4882a593Smuzhiyun
3375*4882a593Smuzhiyun static ssize_t
enable_sdev_max_qd_store(struct device * cdev,struct device_attribute * attr,const char * buf,size_t count)3376*4882a593Smuzhiyun enable_sdev_max_qd_store(struct device *cdev,
3377*4882a593Smuzhiyun struct device_attribute *attr, const char *buf, size_t count)
3378*4882a593Smuzhiyun {
3379*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3380*4882a593Smuzhiyun struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3381*4882a593Smuzhiyun u32 val = 0;
3382*4882a593Smuzhiyun bool is_target_prop;
3383*4882a593Smuzhiyun int ret_target_prop = DCMD_FAILED;
3384*4882a593Smuzhiyun struct scsi_device *sdev;
3385*4882a593Smuzhiyun
3386*4882a593Smuzhiyun if (kstrtou32(buf, 0, &val) != 0) {
3387*4882a593Smuzhiyun pr_err("megasas: could not set enable_sdev_max_qd\n");
3388*4882a593Smuzhiyun return -EINVAL;
3389*4882a593Smuzhiyun }
3390*4882a593Smuzhiyun
3391*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
3392*4882a593Smuzhiyun if (val)
3393*4882a593Smuzhiyun instance->enable_sdev_max_qd = true;
3394*4882a593Smuzhiyun else
3395*4882a593Smuzhiyun instance->enable_sdev_max_qd = false;
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun shost_for_each_device(sdev, shost) {
3398*4882a593Smuzhiyun ret_target_prop = megasas_get_target_prop(instance, sdev);
3399*4882a593Smuzhiyun is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3400*4882a593Smuzhiyun megasas_set_fw_assisted_qd(sdev, is_target_prop);
3401*4882a593Smuzhiyun }
3402*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
3403*4882a593Smuzhiyun
3404*4882a593Smuzhiyun return strlen(buf);
3405*4882a593Smuzhiyun }
3406*4882a593Smuzhiyun
3407*4882a593Smuzhiyun static ssize_t
dump_system_regs_show(struct device * cdev,struct device_attribute * attr,char * buf)3408*4882a593Smuzhiyun dump_system_regs_show(struct device *cdev,
3409*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
3410*4882a593Smuzhiyun {
3411*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3412*4882a593Smuzhiyun struct megasas_instance *instance =
3413*4882a593Smuzhiyun (struct megasas_instance *)shost->hostdata;
3414*4882a593Smuzhiyun
3415*4882a593Smuzhiyun return megasas_dump_sys_regs(instance->reg_set, buf);
3416*4882a593Smuzhiyun }
3417*4882a593Smuzhiyun
3418*4882a593Smuzhiyun static ssize_t
raid_map_id_show(struct device * cdev,struct device_attribute * attr,char * buf)3419*4882a593Smuzhiyun raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3420*4882a593Smuzhiyun char *buf)
3421*4882a593Smuzhiyun {
3422*4882a593Smuzhiyun struct Scsi_Host *shost = class_to_shost(cdev);
3423*4882a593Smuzhiyun struct megasas_instance *instance =
3424*4882a593Smuzhiyun (struct megasas_instance *)shost->hostdata;
3425*4882a593Smuzhiyun
3426*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%ld\n",
3427*4882a593Smuzhiyun (unsigned long)instance->map_id);
3428*4882a593Smuzhiyun }
3429*4882a593Smuzhiyun
3430*4882a593Smuzhiyun static DEVICE_ATTR_RW(fw_crash_buffer);
3431*4882a593Smuzhiyun static DEVICE_ATTR_RO(fw_crash_buffer_size);
3432*4882a593Smuzhiyun static DEVICE_ATTR_RW(fw_crash_state);
3433*4882a593Smuzhiyun static DEVICE_ATTR_RO(page_size);
3434*4882a593Smuzhiyun static DEVICE_ATTR_RO(ldio_outstanding);
3435*4882a593Smuzhiyun static DEVICE_ATTR_RO(fw_cmds_outstanding);
3436*4882a593Smuzhiyun static DEVICE_ATTR_RW(enable_sdev_max_qd);
3437*4882a593Smuzhiyun static DEVICE_ATTR_RO(dump_system_regs);
3438*4882a593Smuzhiyun static DEVICE_ATTR_RO(raid_map_id);
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun static struct device_attribute *megaraid_host_attrs[] = {
3441*4882a593Smuzhiyun &dev_attr_fw_crash_buffer_size,
3442*4882a593Smuzhiyun &dev_attr_fw_crash_buffer,
3443*4882a593Smuzhiyun &dev_attr_fw_crash_state,
3444*4882a593Smuzhiyun &dev_attr_page_size,
3445*4882a593Smuzhiyun &dev_attr_ldio_outstanding,
3446*4882a593Smuzhiyun &dev_attr_fw_cmds_outstanding,
3447*4882a593Smuzhiyun &dev_attr_enable_sdev_max_qd,
3448*4882a593Smuzhiyun &dev_attr_dump_system_regs,
3449*4882a593Smuzhiyun &dev_attr_raid_map_id,
3450*4882a593Smuzhiyun NULL,
3451*4882a593Smuzhiyun };
3452*4882a593Smuzhiyun
3453*4882a593Smuzhiyun /*
3454*4882a593Smuzhiyun * Scsi host template for megaraid_sas driver
3455*4882a593Smuzhiyun */
3456*4882a593Smuzhiyun static struct scsi_host_template megasas_template = {
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun .module = THIS_MODULE,
3459*4882a593Smuzhiyun .name = "Avago SAS based MegaRAID driver",
3460*4882a593Smuzhiyun .proc_name = "megaraid_sas",
3461*4882a593Smuzhiyun .slave_configure = megasas_slave_configure,
3462*4882a593Smuzhiyun .slave_alloc = megasas_slave_alloc,
3463*4882a593Smuzhiyun .slave_destroy = megasas_slave_destroy,
3464*4882a593Smuzhiyun .queuecommand = megasas_queue_command,
3465*4882a593Smuzhiyun .eh_target_reset_handler = megasas_reset_target,
3466*4882a593Smuzhiyun .eh_abort_handler = megasas_task_abort,
3467*4882a593Smuzhiyun .eh_host_reset_handler = megasas_reset_bus_host,
3468*4882a593Smuzhiyun .eh_timed_out = megasas_reset_timer,
3469*4882a593Smuzhiyun .shost_attrs = megaraid_host_attrs,
3470*4882a593Smuzhiyun .bios_param = megasas_bios_param,
3471*4882a593Smuzhiyun .change_queue_depth = scsi_change_queue_depth,
3472*4882a593Smuzhiyun .max_segment_size = 0xffffffff,
3473*4882a593Smuzhiyun };
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun /**
3476*4882a593Smuzhiyun * megasas_complete_int_cmd - Completes an internal command
3477*4882a593Smuzhiyun * @instance: Adapter soft state
3478*4882a593Smuzhiyun * @cmd: Command to be completed
3479*4882a593Smuzhiyun *
3480*4882a593Smuzhiyun * The megasas_issue_blocked_cmd() function waits for a command to complete
3481*4882a593Smuzhiyun * after it issues a command. This function wakes up that waiting routine by
3482*4882a593Smuzhiyun * calling wake_up() on the wait queue.
3483*4882a593Smuzhiyun */
3484*4882a593Smuzhiyun static void
megasas_complete_int_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd)3485*4882a593Smuzhiyun megasas_complete_int_cmd(struct megasas_instance *instance,
3486*4882a593Smuzhiyun struct megasas_cmd *cmd)
3487*4882a593Smuzhiyun {
3488*4882a593Smuzhiyun if (cmd->cmd_status_drv == DCMD_INIT)
3489*4882a593Smuzhiyun cmd->cmd_status_drv =
3490*4882a593Smuzhiyun (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3491*4882a593Smuzhiyun DCMD_SUCCESS : DCMD_FAILED;
3492*4882a593Smuzhiyun
3493*4882a593Smuzhiyun wake_up(&instance->int_cmd_wait_q);
3494*4882a593Smuzhiyun }
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun /**
3497*4882a593Smuzhiyun * megasas_complete_abort - Completes aborting a command
3498*4882a593Smuzhiyun * @instance: Adapter soft state
3499*4882a593Smuzhiyun * @cmd: Cmd that was issued to abort another cmd
3500*4882a593Smuzhiyun *
3501*4882a593Smuzhiyun * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3502*4882a593Smuzhiyun * after it issues an abort on a previously issued command. This function
3503*4882a593Smuzhiyun * wakes up all functions waiting on the same wait queue.
3504*4882a593Smuzhiyun */
3505*4882a593Smuzhiyun static void
megasas_complete_abort(struct megasas_instance * instance,struct megasas_cmd * cmd)3506*4882a593Smuzhiyun megasas_complete_abort(struct megasas_instance *instance,
3507*4882a593Smuzhiyun struct megasas_cmd *cmd)
3508*4882a593Smuzhiyun {
3509*4882a593Smuzhiyun if (cmd->sync_cmd) {
3510*4882a593Smuzhiyun cmd->sync_cmd = 0;
3511*4882a593Smuzhiyun cmd->cmd_status_drv = DCMD_SUCCESS;
3512*4882a593Smuzhiyun wake_up(&instance->abort_cmd_wait_q);
3513*4882a593Smuzhiyun }
3514*4882a593Smuzhiyun }
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun static void
megasas_set_ld_removed_by_fw(struct megasas_instance * instance)3517*4882a593Smuzhiyun megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
3518*4882a593Smuzhiyun {
3519*4882a593Smuzhiyun uint i;
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
3522*4882a593Smuzhiyun if (instance->ld_ids_prev[i] != 0xff &&
3523*4882a593Smuzhiyun instance->ld_ids_from_raidmap[i] == 0xff) {
3524*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
3525*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
3526*4882a593Smuzhiyun "LD target ID %d removed from RAID map\n", i);
3527*4882a593Smuzhiyun instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
3528*4882a593Smuzhiyun }
3529*4882a593Smuzhiyun }
3530*4882a593Smuzhiyun }
3531*4882a593Smuzhiyun
3532*4882a593Smuzhiyun /**
3533*4882a593Smuzhiyun * megasas_complete_cmd - Completes a command
3534*4882a593Smuzhiyun * @instance: Adapter soft state
3535*4882a593Smuzhiyun * @cmd: Command to be completed
3536*4882a593Smuzhiyun * @alt_status: If non-zero, use this value as status to
3537*4882a593Smuzhiyun * SCSI mid-layer instead of the value returned
3538*4882a593Smuzhiyun * by the FW. This should be used if caller wants
3539*4882a593Smuzhiyun * an alternate status (as in the case of aborted
3540*4882a593Smuzhiyun * commands)
3541*4882a593Smuzhiyun */
3542*4882a593Smuzhiyun void
megasas_complete_cmd(struct megasas_instance * instance,struct megasas_cmd * cmd,u8 alt_status)3543*4882a593Smuzhiyun megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3544*4882a593Smuzhiyun u8 alt_status)
3545*4882a593Smuzhiyun {
3546*4882a593Smuzhiyun int exception = 0;
3547*4882a593Smuzhiyun struct megasas_header *hdr = &cmd->frame->hdr;
3548*4882a593Smuzhiyun unsigned long flags;
3549*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
3550*4882a593Smuzhiyun u32 opcode, status;
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun /* flag for the retry reset */
3553*4882a593Smuzhiyun cmd->retry_for_fw_reset = 0;
3554*4882a593Smuzhiyun
3555*4882a593Smuzhiyun if (cmd->scmd)
3556*4882a593Smuzhiyun cmd->scmd->SCp.ptr = NULL;
3557*4882a593Smuzhiyun
3558*4882a593Smuzhiyun switch (hdr->cmd) {
3559*4882a593Smuzhiyun case MFI_CMD_INVALID:
3560*4882a593Smuzhiyun /* Some older 1068 controller FW may keep a pended
3561*4882a593Smuzhiyun MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3562*4882a593Smuzhiyun when booting the kdump kernel. Ignore this command to
3563*4882a593Smuzhiyun prevent a kernel panic on shutdown of the kdump kernel. */
3564*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3565*4882a593Smuzhiyun "completed\n");
3566*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "If you have a controller "
3567*4882a593Smuzhiyun "other than PERC5, please upgrade your firmware\n");
3568*4882a593Smuzhiyun break;
3569*4882a593Smuzhiyun case MFI_CMD_PD_SCSI_IO:
3570*4882a593Smuzhiyun case MFI_CMD_LD_SCSI_IO:
3571*4882a593Smuzhiyun
3572*4882a593Smuzhiyun /*
3573*4882a593Smuzhiyun * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3574*4882a593Smuzhiyun * issued either through an IO path or an IOCTL path. If it
3575*4882a593Smuzhiyun * was via IOCTL, we will send it to internal completion.
3576*4882a593Smuzhiyun */
3577*4882a593Smuzhiyun if (cmd->sync_cmd) {
3578*4882a593Smuzhiyun cmd->sync_cmd = 0;
3579*4882a593Smuzhiyun megasas_complete_int_cmd(instance, cmd);
3580*4882a593Smuzhiyun break;
3581*4882a593Smuzhiyun }
3582*4882a593Smuzhiyun fallthrough;
3583*4882a593Smuzhiyun
3584*4882a593Smuzhiyun case MFI_CMD_LD_READ:
3585*4882a593Smuzhiyun case MFI_CMD_LD_WRITE:
3586*4882a593Smuzhiyun
3587*4882a593Smuzhiyun if (alt_status) {
3588*4882a593Smuzhiyun cmd->scmd->result = alt_status << 16;
3589*4882a593Smuzhiyun exception = 1;
3590*4882a593Smuzhiyun }
3591*4882a593Smuzhiyun
3592*4882a593Smuzhiyun if (exception) {
3593*4882a593Smuzhiyun
3594*4882a593Smuzhiyun atomic_dec(&instance->fw_outstanding);
3595*4882a593Smuzhiyun
3596*4882a593Smuzhiyun scsi_dma_unmap(cmd->scmd);
3597*4882a593Smuzhiyun cmd->scmd->scsi_done(cmd->scmd);
3598*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3599*4882a593Smuzhiyun
3600*4882a593Smuzhiyun break;
3601*4882a593Smuzhiyun }
3602*4882a593Smuzhiyun
3603*4882a593Smuzhiyun switch (hdr->cmd_status) {
3604*4882a593Smuzhiyun
3605*4882a593Smuzhiyun case MFI_STAT_OK:
3606*4882a593Smuzhiyun cmd->scmd->result = DID_OK << 16;
3607*4882a593Smuzhiyun break;
3608*4882a593Smuzhiyun
3609*4882a593Smuzhiyun case MFI_STAT_SCSI_IO_FAILED:
3610*4882a593Smuzhiyun case MFI_STAT_LD_INIT_IN_PROGRESS:
3611*4882a593Smuzhiyun cmd->scmd->result =
3612*4882a593Smuzhiyun (DID_ERROR << 16) | hdr->scsi_status;
3613*4882a593Smuzhiyun break;
3614*4882a593Smuzhiyun
3615*4882a593Smuzhiyun case MFI_STAT_SCSI_DONE_WITH_ERROR:
3616*4882a593Smuzhiyun
3617*4882a593Smuzhiyun cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3618*4882a593Smuzhiyun
3619*4882a593Smuzhiyun if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3620*4882a593Smuzhiyun memset(cmd->scmd->sense_buffer, 0,
3621*4882a593Smuzhiyun SCSI_SENSE_BUFFERSIZE);
3622*4882a593Smuzhiyun memcpy(cmd->scmd->sense_buffer, cmd->sense,
3623*4882a593Smuzhiyun hdr->sense_len);
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun cmd->scmd->result |= DRIVER_SENSE << 24;
3626*4882a593Smuzhiyun }
3627*4882a593Smuzhiyun
3628*4882a593Smuzhiyun break;
3629*4882a593Smuzhiyun
3630*4882a593Smuzhiyun case MFI_STAT_LD_OFFLINE:
3631*4882a593Smuzhiyun case MFI_STAT_DEVICE_NOT_FOUND:
3632*4882a593Smuzhiyun cmd->scmd->result = DID_BAD_TARGET << 16;
3633*4882a593Smuzhiyun break;
3634*4882a593Smuzhiyun
3635*4882a593Smuzhiyun default:
3636*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3637*4882a593Smuzhiyun hdr->cmd_status);
3638*4882a593Smuzhiyun cmd->scmd->result = DID_ERROR << 16;
3639*4882a593Smuzhiyun break;
3640*4882a593Smuzhiyun }
3641*4882a593Smuzhiyun
3642*4882a593Smuzhiyun atomic_dec(&instance->fw_outstanding);
3643*4882a593Smuzhiyun
3644*4882a593Smuzhiyun scsi_dma_unmap(cmd->scmd);
3645*4882a593Smuzhiyun cmd->scmd->scsi_done(cmd->scmd);
3646*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3647*4882a593Smuzhiyun
3648*4882a593Smuzhiyun break;
3649*4882a593Smuzhiyun
3650*4882a593Smuzhiyun case MFI_CMD_SMP:
3651*4882a593Smuzhiyun case MFI_CMD_STP:
3652*4882a593Smuzhiyun case MFI_CMD_NVME:
3653*4882a593Smuzhiyun case MFI_CMD_TOOLBOX:
3654*4882a593Smuzhiyun megasas_complete_int_cmd(instance, cmd);
3655*4882a593Smuzhiyun break;
3656*4882a593Smuzhiyun
3657*4882a593Smuzhiyun case MFI_CMD_DCMD:
3658*4882a593Smuzhiyun opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3659*4882a593Smuzhiyun /* Check for LD map update */
3660*4882a593Smuzhiyun if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3661*4882a593Smuzhiyun && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3662*4882a593Smuzhiyun fusion->fast_path_io = 0;
3663*4882a593Smuzhiyun spin_lock_irqsave(instance->host->host_lock, flags);
3664*4882a593Smuzhiyun status = cmd->frame->hdr.cmd_status;
3665*4882a593Smuzhiyun instance->map_update_cmd = NULL;
3666*4882a593Smuzhiyun if (status != MFI_STAT_OK) {
3667*4882a593Smuzhiyun if (status != MFI_STAT_NOT_FOUND)
3668*4882a593Smuzhiyun dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3669*4882a593Smuzhiyun cmd->frame->hdr.cmd_status);
3670*4882a593Smuzhiyun else {
3671*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3672*4882a593Smuzhiyun spin_unlock_irqrestore(
3673*4882a593Smuzhiyun instance->host->host_lock,
3674*4882a593Smuzhiyun flags);
3675*4882a593Smuzhiyun break;
3676*4882a593Smuzhiyun }
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun
3679*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3680*4882a593Smuzhiyun
3681*4882a593Smuzhiyun /*
3682*4882a593Smuzhiyun * Set fast path IO to ZERO.
3683*4882a593Smuzhiyun * Validate Map will set proper value.
3684*4882a593Smuzhiyun * Meanwhile all IOs will go as LD IO.
3685*4882a593Smuzhiyun */
3686*4882a593Smuzhiyun if (status == MFI_STAT_OK &&
3687*4882a593Smuzhiyun (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3688*4882a593Smuzhiyun instance->map_id++;
3689*4882a593Smuzhiyun fusion->fast_path_io = 1;
3690*4882a593Smuzhiyun } else {
3691*4882a593Smuzhiyun fusion->fast_path_io = 0;
3692*4882a593Smuzhiyun }
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun if (instance->adapter_type >= INVADER_SERIES)
3695*4882a593Smuzhiyun megasas_set_ld_removed_by_fw(instance);
3696*4882a593Smuzhiyun
3697*4882a593Smuzhiyun megasas_sync_map_info(instance);
3698*4882a593Smuzhiyun spin_unlock_irqrestore(instance->host->host_lock,
3699*4882a593Smuzhiyun flags);
3700*4882a593Smuzhiyun
3701*4882a593Smuzhiyun break;
3702*4882a593Smuzhiyun }
3703*4882a593Smuzhiyun if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3704*4882a593Smuzhiyun opcode == MR_DCMD_CTRL_EVENT_GET) {
3705*4882a593Smuzhiyun spin_lock_irqsave(&poll_aen_lock, flags);
3706*4882a593Smuzhiyun megasas_poll_wait_aen = 0;
3707*4882a593Smuzhiyun spin_unlock_irqrestore(&poll_aen_lock, flags);
3708*4882a593Smuzhiyun }
3709*4882a593Smuzhiyun
3710*4882a593Smuzhiyun /* FW has an updated PD sequence */
3711*4882a593Smuzhiyun if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3712*4882a593Smuzhiyun (cmd->frame->dcmd.mbox.b[0] == 1)) {
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun spin_lock_irqsave(instance->host->host_lock, flags);
3715*4882a593Smuzhiyun status = cmd->frame->hdr.cmd_status;
3716*4882a593Smuzhiyun instance->jbod_seq_cmd = NULL;
3717*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
3718*4882a593Smuzhiyun
3719*4882a593Smuzhiyun if (status == MFI_STAT_OK) {
3720*4882a593Smuzhiyun instance->pd_seq_map_id++;
3721*4882a593Smuzhiyun /* Re-register a pd sync seq num cmd */
3722*4882a593Smuzhiyun if (megasas_sync_pd_seq_num(instance, true))
3723*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = false;
3724*4882a593Smuzhiyun } else
3725*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = false;
3726*4882a593Smuzhiyun
3727*4882a593Smuzhiyun spin_unlock_irqrestore(instance->host->host_lock, flags);
3728*4882a593Smuzhiyun break;
3729*4882a593Smuzhiyun }
3730*4882a593Smuzhiyun
3731*4882a593Smuzhiyun /*
3732*4882a593Smuzhiyun * See if got an event notification
3733*4882a593Smuzhiyun */
3734*4882a593Smuzhiyun if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3735*4882a593Smuzhiyun megasas_service_aen(instance, cmd);
3736*4882a593Smuzhiyun else
3737*4882a593Smuzhiyun megasas_complete_int_cmd(instance, cmd);
3738*4882a593Smuzhiyun
3739*4882a593Smuzhiyun break;
3740*4882a593Smuzhiyun
3741*4882a593Smuzhiyun case MFI_CMD_ABORT:
3742*4882a593Smuzhiyun /*
3743*4882a593Smuzhiyun * Cmd issued to abort another cmd returned
3744*4882a593Smuzhiyun */
3745*4882a593Smuzhiyun megasas_complete_abort(instance, cmd);
3746*4882a593Smuzhiyun break;
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun default:
3749*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3750*4882a593Smuzhiyun hdr->cmd);
3751*4882a593Smuzhiyun megasas_complete_int_cmd(instance, cmd);
3752*4882a593Smuzhiyun break;
3753*4882a593Smuzhiyun }
3754*4882a593Smuzhiyun }
3755*4882a593Smuzhiyun
3756*4882a593Smuzhiyun /**
3757*4882a593Smuzhiyun * megasas_issue_pending_cmds_again - issue all pending cmds
3758*4882a593Smuzhiyun * in FW again because of the fw reset
3759*4882a593Smuzhiyun * @instance: Adapter soft state
3760*4882a593Smuzhiyun */
3761*4882a593Smuzhiyun static inline void
megasas_issue_pending_cmds_again(struct megasas_instance * instance)3762*4882a593Smuzhiyun megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3763*4882a593Smuzhiyun {
3764*4882a593Smuzhiyun struct megasas_cmd *cmd;
3765*4882a593Smuzhiyun struct list_head clist_local;
3766*4882a593Smuzhiyun union megasas_evt_class_locale class_locale;
3767*4882a593Smuzhiyun unsigned long flags;
3768*4882a593Smuzhiyun u32 seq_num;
3769*4882a593Smuzhiyun
3770*4882a593Smuzhiyun INIT_LIST_HEAD(&clist_local);
3771*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
3772*4882a593Smuzhiyun list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3773*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
3774*4882a593Smuzhiyun
3775*4882a593Smuzhiyun while (!list_empty(&clist_local)) {
3776*4882a593Smuzhiyun cmd = list_entry((&clist_local)->next,
3777*4882a593Smuzhiyun struct megasas_cmd, list);
3778*4882a593Smuzhiyun list_del_init(&cmd->list);
3779*4882a593Smuzhiyun
3780*4882a593Smuzhiyun if (cmd->sync_cmd || cmd->scmd) {
3781*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3782*4882a593Smuzhiyun "detected to be pending while HBA reset\n",
3783*4882a593Smuzhiyun cmd, cmd->scmd, cmd->sync_cmd);
3784*4882a593Smuzhiyun
3785*4882a593Smuzhiyun cmd->retry_for_fw_reset++;
3786*4882a593Smuzhiyun
3787*4882a593Smuzhiyun if (cmd->retry_for_fw_reset == 3) {
3788*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3789*4882a593Smuzhiyun "was tried multiple times during reset."
3790*4882a593Smuzhiyun "Shutting down the HBA\n",
3791*4882a593Smuzhiyun cmd, cmd->scmd, cmd->sync_cmd);
3792*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
3793*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 1);
3794*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
3795*4882a593Smuzhiyun return;
3796*4882a593Smuzhiyun }
3797*4882a593Smuzhiyun }
3798*4882a593Smuzhiyun
3799*4882a593Smuzhiyun if (cmd->sync_cmd == 1) {
3800*4882a593Smuzhiyun if (cmd->scmd) {
3801*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "unexpected"
3802*4882a593Smuzhiyun "cmd attached to internal command!\n");
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3805*4882a593Smuzhiyun "on the internal reset queue,"
3806*4882a593Smuzhiyun "issue it again.\n", cmd);
3807*4882a593Smuzhiyun cmd->cmd_status_drv = DCMD_INIT;
3808*4882a593Smuzhiyun instance->instancet->fire_cmd(instance,
3809*4882a593Smuzhiyun cmd->frame_phys_addr,
3810*4882a593Smuzhiyun 0, instance->reg_set);
3811*4882a593Smuzhiyun } else if (cmd->scmd) {
3812*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3813*4882a593Smuzhiyun "detected on the internal queue, issue again.\n",
3814*4882a593Smuzhiyun cmd, cmd->scmd->cmnd[0]);
3815*4882a593Smuzhiyun
3816*4882a593Smuzhiyun atomic_inc(&instance->fw_outstanding);
3817*4882a593Smuzhiyun instance->instancet->fire_cmd(instance,
3818*4882a593Smuzhiyun cmd->frame_phys_addr,
3819*4882a593Smuzhiyun cmd->frame_count-1, instance->reg_set);
3820*4882a593Smuzhiyun } else {
3821*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3822*4882a593Smuzhiyun "internal reset defer list while re-issue!!\n",
3823*4882a593Smuzhiyun cmd);
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun }
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun if (instance->aen_cmd) {
3828*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3829*4882a593Smuzhiyun megasas_return_cmd(instance, instance->aen_cmd);
3830*4882a593Smuzhiyun
3831*4882a593Smuzhiyun instance->aen_cmd = NULL;
3832*4882a593Smuzhiyun }
3833*4882a593Smuzhiyun
3834*4882a593Smuzhiyun /*
3835*4882a593Smuzhiyun * Initiate AEN (Asynchronous Event Notification)
3836*4882a593Smuzhiyun */
3837*4882a593Smuzhiyun seq_num = instance->last_seq_num;
3838*4882a593Smuzhiyun class_locale.members.reserved = 0;
3839*4882a593Smuzhiyun class_locale.members.locale = MR_EVT_LOCALE_ALL;
3840*4882a593Smuzhiyun class_locale.members.class = MR_EVT_CLASS_DEBUG;
3841*4882a593Smuzhiyun
3842*4882a593Smuzhiyun megasas_register_aen(instance, seq_num, class_locale.word);
3843*4882a593Smuzhiyun }
3844*4882a593Smuzhiyun
3845*4882a593Smuzhiyun /*
3846*4882a593Smuzhiyun * Move the internal reset pending commands to a deferred queue.
3847*4882a593Smuzhiyun *
3848*4882a593Smuzhiyun * We move the commands pending at internal reset time to a
3849*4882a593Smuzhiyun * pending queue. This queue would be flushed after successful
3850*4882a593Smuzhiyun * completion of the internal reset sequence. if the internal reset
3851*4882a593Smuzhiyun * did not complete in time, the kernel reset handler would flush
3852*4882a593Smuzhiyun * these commands.
3853*4882a593Smuzhiyun */
3854*4882a593Smuzhiyun static void
megasas_internal_reset_defer_cmds(struct megasas_instance * instance)3855*4882a593Smuzhiyun megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3856*4882a593Smuzhiyun {
3857*4882a593Smuzhiyun struct megasas_cmd *cmd;
3858*4882a593Smuzhiyun int i;
3859*4882a593Smuzhiyun u16 max_cmd = instance->max_fw_cmds;
3860*4882a593Smuzhiyun u32 defer_index;
3861*4882a593Smuzhiyun unsigned long flags;
3862*4882a593Smuzhiyun
3863*4882a593Smuzhiyun defer_index = 0;
3864*4882a593Smuzhiyun spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3865*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
3866*4882a593Smuzhiyun cmd = instance->cmd_list[i];
3867*4882a593Smuzhiyun if (cmd->sync_cmd == 1 || cmd->scmd) {
3868*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3869*4882a593Smuzhiyun "on the defer queue as internal\n",
3870*4882a593Smuzhiyun defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3871*4882a593Smuzhiyun
3872*4882a593Smuzhiyun if (!list_empty(&cmd->list)) {
3873*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "ERROR while"
3874*4882a593Smuzhiyun " moving this cmd:%p, %d %p, it was"
3875*4882a593Smuzhiyun "discovered on some list?\n",
3876*4882a593Smuzhiyun cmd, cmd->sync_cmd, cmd->scmd);
3877*4882a593Smuzhiyun
3878*4882a593Smuzhiyun list_del_init(&cmd->list);
3879*4882a593Smuzhiyun }
3880*4882a593Smuzhiyun defer_index++;
3881*4882a593Smuzhiyun list_add_tail(&cmd->list,
3882*4882a593Smuzhiyun &instance->internal_reset_pending_q);
3883*4882a593Smuzhiyun }
3884*4882a593Smuzhiyun }
3885*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3886*4882a593Smuzhiyun }
3887*4882a593Smuzhiyun
3888*4882a593Smuzhiyun
3889*4882a593Smuzhiyun static void
process_fw_state_change_wq(struct work_struct * work)3890*4882a593Smuzhiyun process_fw_state_change_wq(struct work_struct *work)
3891*4882a593Smuzhiyun {
3892*4882a593Smuzhiyun struct megasas_instance *instance =
3893*4882a593Smuzhiyun container_of(work, struct megasas_instance, work_init);
3894*4882a593Smuzhiyun u32 wait;
3895*4882a593Smuzhiyun unsigned long flags;
3896*4882a593Smuzhiyun
3897*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3898*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3899*4882a593Smuzhiyun atomic_read(&instance->adprecovery));
3900*4882a593Smuzhiyun return ;
3901*4882a593Smuzhiyun }
3902*4882a593Smuzhiyun
3903*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3904*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3905*4882a593Smuzhiyun "state, restarting it...\n");
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
3908*4882a593Smuzhiyun atomic_set(&instance->fw_outstanding, 0);
3909*4882a593Smuzhiyun
3910*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 1);
3911*4882a593Smuzhiyun instance->instancet->adp_reset(instance, instance->reg_set);
3912*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 0);
3913*4882a593Smuzhiyun
3914*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3915*4882a593Smuzhiyun "initiating next stage...\n");
3916*4882a593Smuzhiyun
3917*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3918*4882a593Smuzhiyun "state 2 starting...\n");
3919*4882a593Smuzhiyun
3920*4882a593Smuzhiyun /* waiting for about 20 second before start the second init */
3921*4882a593Smuzhiyun for (wait = 0; wait < 30; wait++) {
3922*4882a593Smuzhiyun msleep(1000);
3923*4882a593Smuzhiyun }
3924*4882a593Smuzhiyun
3925*4882a593Smuzhiyun if (megasas_transition_to_ready(instance, 1)) {
3926*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "adapter not ready\n");
3927*4882a593Smuzhiyun
3928*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 1);
3929*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
3930*4882a593Smuzhiyun return ;
3931*4882a593Smuzhiyun }
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3934*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3935*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3936*4882a593Smuzhiyun ) {
3937*4882a593Smuzhiyun *instance->consumer = *instance->producer;
3938*4882a593Smuzhiyun } else {
3939*4882a593Smuzhiyun *instance->consumer = 0;
3940*4882a593Smuzhiyun *instance->producer = 0;
3941*4882a593Smuzhiyun }
3942*4882a593Smuzhiyun
3943*4882a593Smuzhiyun megasas_issue_init_mfi(instance);
3944*4882a593Smuzhiyun
3945*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
3946*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3947*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
3948*4882a593Smuzhiyun instance->instancet->enable_intr(instance);
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun megasas_issue_pending_cmds_again(instance);
3951*4882a593Smuzhiyun instance->issuepend_done = 1;
3952*4882a593Smuzhiyun }
3953*4882a593Smuzhiyun }
3954*4882a593Smuzhiyun
3955*4882a593Smuzhiyun /**
3956*4882a593Smuzhiyun * megasas_deplete_reply_queue - Processes all completed commands
3957*4882a593Smuzhiyun * @instance: Adapter soft state
3958*4882a593Smuzhiyun * @alt_status: Alternate status to be returned to
3959*4882a593Smuzhiyun * SCSI mid-layer instead of the status
3960*4882a593Smuzhiyun * returned by the FW
3961*4882a593Smuzhiyun * Note: this must be called with hba lock held
3962*4882a593Smuzhiyun */
3963*4882a593Smuzhiyun static int
megasas_deplete_reply_queue(struct megasas_instance * instance,u8 alt_status)3964*4882a593Smuzhiyun megasas_deplete_reply_queue(struct megasas_instance *instance,
3965*4882a593Smuzhiyun u8 alt_status)
3966*4882a593Smuzhiyun {
3967*4882a593Smuzhiyun u32 mfiStatus;
3968*4882a593Smuzhiyun u32 fw_state;
3969*4882a593Smuzhiyun
3970*4882a593Smuzhiyun if ((mfiStatus = instance->instancet->check_reset(instance,
3971*4882a593Smuzhiyun instance->reg_set)) == 1) {
3972*4882a593Smuzhiyun return IRQ_HANDLED;
3973*4882a593Smuzhiyun }
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun mfiStatus = instance->instancet->clear_intr(instance);
3976*4882a593Smuzhiyun if (mfiStatus == 0) {
3977*4882a593Smuzhiyun /* Hardware may not set outbound_intr_status in MSI-X mode */
3978*4882a593Smuzhiyun if (!instance->msix_vectors)
3979*4882a593Smuzhiyun return IRQ_NONE;
3980*4882a593Smuzhiyun }
3981*4882a593Smuzhiyun
3982*4882a593Smuzhiyun instance->mfiStatus = mfiStatus;
3983*4882a593Smuzhiyun
3984*4882a593Smuzhiyun if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3985*4882a593Smuzhiyun fw_state = instance->instancet->read_fw_status_reg(
3986*4882a593Smuzhiyun instance) & MFI_STATE_MASK;
3987*4882a593Smuzhiyun
3988*4882a593Smuzhiyun if (fw_state != MFI_STATE_FAULT) {
3989*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "fw state:%x\n",
3990*4882a593Smuzhiyun fw_state);
3991*4882a593Smuzhiyun }
3992*4882a593Smuzhiyun
3993*4882a593Smuzhiyun if ((fw_state == MFI_STATE_FAULT) &&
3994*4882a593Smuzhiyun (instance->disableOnlineCtrlReset == 0)) {
3995*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "wait adp restart\n");
3996*4882a593Smuzhiyun
3997*4882a593Smuzhiyun if ((instance->pdev->device ==
3998*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS1064R) ||
3999*4882a593Smuzhiyun (instance->pdev->device ==
4000*4882a593Smuzhiyun PCI_DEVICE_ID_DELL_PERC5) ||
4001*4882a593Smuzhiyun (instance->pdev->device ==
4002*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
4003*4882a593Smuzhiyun
4004*4882a593Smuzhiyun *instance->consumer =
4005*4882a593Smuzhiyun cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
4006*4882a593Smuzhiyun }
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun
4009*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
4010*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4011*4882a593Smuzhiyun instance->issuepend_done = 0;
4012*4882a593Smuzhiyun
4013*4882a593Smuzhiyun atomic_set(&instance->fw_outstanding, 0);
4014*4882a593Smuzhiyun megasas_internal_reset_defer_cmds(instance);
4015*4882a593Smuzhiyun
4016*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
4017*4882a593Smuzhiyun fw_state, atomic_read(&instance->adprecovery));
4018*4882a593Smuzhiyun
4019*4882a593Smuzhiyun schedule_work(&instance->work_init);
4020*4882a593Smuzhiyun return IRQ_HANDLED;
4021*4882a593Smuzhiyun
4022*4882a593Smuzhiyun } else {
4023*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
4024*4882a593Smuzhiyun fw_state, instance->disableOnlineCtrlReset);
4025*4882a593Smuzhiyun }
4026*4882a593Smuzhiyun }
4027*4882a593Smuzhiyun
4028*4882a593Smuzhiyun tasklet_schedule(&instance->isr_tasklet);
4029*4882a593Smuzhiyun return IRQ_HANDLED;
4030*4882a593Smuzhiyun }
4031*4882a593Smuzhiyun
4032*4882a593Smuzhiyun /**
4033*4882a593Smuzhiyun * megasas_isr - isr entry point
4034*4882a593Smuzhiyun * @irq: IRQ number
4035*4882a593Smuzhiyun * @devp: IRQ context address
4036*4882a593Smuzhiyun */
megasas_isr(int irq,void * devp)4037*4882a593Smuzhiyun static irqreturn_t megasas_isr(int irq, void *devp)
4038*4882a593Smuzhiyun {
4039*4882a593Smuzhiyun struct megasas_irq_context *irq_context = devp;
4040*4882a593Smuzhiyun struct megasas_instance *instance = irq_context->instance;
4041*4882a593Smuzhiyun unsigned long flags;
4042*4882a593Smuzhiyun irqreturn_t rc;
4043*4882a593Smuzhiyun
4044*4882a593Smuzhiyun if (atomic_read(&instance->fw_reset_no_pci_access))
4045*4882a593Smuzhiyun return IRQ_HANDLED;
4046*4882a593Smuzhiyun
4047*4882a593Smuzhiyun spin_lock_irqsave(&instance->hba_lock, flags);
4048*4882a593Smuzhiyun rc = megasas_deplete_reply_queue(instance, DID_OK);
4049*4882a593Smuzhiyun spin_unlock_irqrestore(&instance->hba_lock, flags);
4050*4882a593Smuzhiyun
4051*4882a593Smuzhiyun return rc;
4052*4882a593Smuzhiyun }
4053*4882a593Smuzhiyun
4054*4882a593Smuzhiyun /**
4055*4882a593Smuzhiyun * megasas_transition_to_ready - Move the FW to READY state
4056*4882a593Smuzhiyun * @instance: Adapter soft state
4057*4882a593Smuzhiyun * @ocr: Adapter reset state
4058*4882a593Smuzhiyun *
4059*4882a593Smuzhiyun * During the initialization, FW passes can potentially be in any one of
4060*4882a593Smuzhiyun * several possible states. If the FW in operational, waiting-for-handshake
4061*4882a593Smuzhiyun * states, driver must take steps to bring it to ready state. Otherwise, it
4062*4882a593Smuzhiyun * has to wait for the ready state.
4063*4882a593Smuzhiyun */
4064*4882a593Smuzhiyun int
megasas_transition_to_ready(struct megasas_instance * instance,int ocr)4065*4882a593Smuzhiyun megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4066*4882a593Smuzhiyun {
4067*4882a593Smuzhiyun int i;
4068*4882a593Smuzhiyun u8 max_wait;
4069*4882a593Smuzhiyun u32 fw_state;
4070*4882a593Smuzhiyun u32 abs_state, curr_abs_state;
4071*4882a593Smuzhiyun
4072*4882a593Smuzhiyun abs_state = instance->instancet->read_fw_status_reg(instance);
4073*4882a593Smuzhiyun fw_state = abs_state & MFI_STATE_MASK;
4074*4882a593Smuzhiyun
4075*4882a593Smuzhiyun if (fw_state != MFI_STATE_READY)
4076*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4077*4882a593Smuzhiyun " state\n");
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun while (fw_state != MFI_STATE_READY) {
4080*4882a593Smuzhiyun
4081*4882a593Smuzhiyun switch (fw_state) {
4082*4882a593Smuzhiyun
4083*4882a593Smuzhiyun case MFI_STATE_FAULT:
4084*4882a593Smuzhiyun dev_printk(KERN_ERR, &instance->pdev->dev,
4085*4882a593Smuzhiyun "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4086*4882a593Smuzhiyun abs_state & MFI_STATE_FAULT_CODE,
4087*4882a593Smuzhiyun abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4088*4882a593Smuzhiyun if (ocr) {
4089*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4090*4882a593Smuzhiyun break;
4091*4882a593Smuzhiyun } else {
4092*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4093*4882a593Smuzhiyun megasas_dump_reg_set(instance->reg_set);
4094*4882a593Smuzhiyun return -ENODEV;
4095*4882a593Smuzhiyun }
4096*4882a593Smuzhiyun
4097*4882a593Smuzhiyun case MFI_STATE_WAIT_HANDSHAKE:
4098*4882a593Smuzhiyun /*
4099*4882a593Smuzhiyun * Set the CLR bit in inbound doorbell
4100*4882a593Smuzhiyun */
4101*4882a593Smuzhiyun if ((instance->pdev->device ==
4102*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4103*4882a593Smuzhiyun (instance->pdev->device ==
4104*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4105*4882a593Smuzhiyun (instance->adapter_type != MFI_SERIES))
4106*4882a593Smuzhiyun writel(
4107*4882a593Smuzhiyun MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4108*4882a593Smuzhiyun &instance->reg_set->doorbell);
4109*4882a593Smuzhiyun else
4110*4882a593Smuzhiyun writel(
4111*4882a593Smuzhiyun MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4112*4882a593Smuzhiyun &instance->reg_set->inbound_doorbell);
4113*4882a593Smuzhiyun
4114*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4115*4882a593Smuzhiyun break;
4116*4882a593Smuzhiyun
4117*4882a593Smuzhiyun case MFI_STATE_BOOT_MESSAGE_PENDING:
4118*4882a593Smuzhiyun if ((instance->pdev->device ==
4119*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4120*4882a593Smuzhiyun (instance->pdev->device ==
4121*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4122*4882a593Smuzhiyun (instance->adapter_type != MFI_SERIES))
4123*4882a593Smuzhiyun writel(MFI_INIT_HOTPLUG,
4124*4882a593Smuzhiyun &instance->reg_set->doorbell);
4125*4882a593Smuzhiyun else
4126*4882a593Smuzhiyun writel(MFI_INIT_HOTPLUG,
4127*4882a593Smuzhiyun &instance->reg_set->inbound_doorbell);
4128*4882a593Smuzhiyun
4129*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4130*4882a593Smuzhiyun break;
4131*4882a593Smuzhiyun
4132*4882a593Smuzhiyun case MFI_STATE_OPERATIONAL:
4133*4882a593Smuzhiyun /*
4134*4882a593Smuzhiyun * Bring it to READY state; assuming max wait 10 secs
4135*4882a593Smuzhiyun */
4136*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
4137*4882a593Smuzhiyun if ((instance->pdev->device ==
4138*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4139*4882a593Smuzhiyun (instance->pdev->device ==
4140*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4141*4882a593Smuzhiyun (instance->adapter_type != MFI_SERIES)) {
4142*4882a593Smuzhiyun writel(MFI_RESET_FLAGS,
4143*4882a593Smuzhiyun &instance->reg_set->doorbell);
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
4146*4882a593Smuzhiyun for (i = 0; i < (10 * 1000); i += 20) {
4147*4882a593Smuzhiyun if (megasas_readl(
4148*4882a593Smuzhiyun instance,
4149*4882a593Smuzhiyun &instance->
4150*4882a593Smuzhiyun reg_set->
4151*4882a593Smuzhiyun doorbell) & 1)
4152*4882a593Smuzhiyun msleep(20);
4153*4882a593Smuzhiyun else
4154*4882a593Smuzhiyun break;
4155*4882a593Smuzhiyun }
4156*4882a593Smuzhiyun }
4157*4882a593Smuzhiyun } else
4158*4882a593Smuzhiyun writel(MFI_RESET_FLAGS,
4159*4882a593Smuzhiyun &instance->reg_set->inbound_doorbell);
4160*4882a593Smuzhiyun
4161*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4162*4882a593Smuzhiyun break;
4163*4882a593Smuzhiyun
4164*4882a593Smuzhiyun case MFI_STATE_UNDEFINED:
4165*4882a593Smuzhiyun /*
4166*4882a593Smuzhiyun * This state should not last for more than 2 seconds
4167*4882a593Smuzhiyun */
4168*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4169*4882a593Smuzhiyun break;
4170*4882a593Smuzhiyun
4171*4882a593Smuzhiyun case MFI_STATE_BB_INIT:
4172*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4173*4882a593Smuzhiyun break;
4174*4882a593Smuzhiyun
4175*4882a593Smuzhiyun case MFI_STATE_FW_INIT:
4176*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4177*4882a593Smuzhiyun break;
4178*4882a593Smuzhiyun
4179*4882a593Smuzhiyun case MFI_STATE_FW_INIT_2:
4180*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4181*4882a593Smuzhiyun break;
4182*4882a593Smuzhiyun
4183*4882a593Smuzhiyun case MFI_STATE_DEVICE_SCAN:
4184*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4185*4882a593Smuzhiyun break;
4186*4882a593Smuzhiyun
4187*4882a593Smuzhiyun case MFI_STATE_FLUSH_CACHE:
4188*4882a593Smuzhiyun max_wait = MEGASAS_RESET_WAIT_TIME;
4189*4882a593Smuzhiyun break;
4190*4882a593Smuzhiyun
4191*4882a593Smuzhiyun default:
4192*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4193*4882a593Smuzhiyun fw_state);
4194*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4195*4882a593Smuzhiyun megasas_dump_reg_set(instance->reg_set);
4196*4882a593Smuzhiyun return -ENODEV;
4197*4882a593Smuzhiyun }
4198*4882a593Smuzhiyun
4199*4882a593Smuzhiyun /*
4200*4882a593Smuzhiyun * The cur_state should not last for more than max_wait secs
4201*4882a593Smuzhiyun */
4202*4882a593Smuzhiyun for (i = 0; i < max_wait * 50; i++) {
4203*4882a593Smuzhiyun curr_abs_state = instance->instancet->
4204*4882a593Smuzhiyun read_fw_status_reg(instance);
4205*4882a593Smuzhiyun
4206*4882a593Smuzhiyun if (abs_state == curr_abs_state) {
4207*4882a593Smuzhiyun msleep(20);
4208*4882a593Smuzhiyun } else
4209*4882a593Smuzhiyun break;
4210*4882a593Smuzhiyun }
4211*4882a593Smuzhiyun
4212*4882a593Smuzhiyun /*
4213*4882a593Smuzhiyun * Return error if fw_state hasn't changed after max_wait
4214*4882a593Smuzhiyun */
4215*4882a593Smuzhiyun if (curr_abs_state == abs_state) {
4216*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4217*4882a593Smuzhiyun "in %d secs\n", fw_state, max_wait);
4218*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4219*4882a593Smuzhiyun megasas_dump_reg_set(instance->reg_set);
4220*4882a593Smuzhiyun return -ENODEV;
4221*4882a593Smuzhiyun }
4222*4882a593Smuzhiyun
4223*4882a593Smuzhiyun abs_state = curr_abs_state;
4224*4882a593Smuzhiyun fw_state = curr_abs_state & MFI_STATE_MASK;
4225*4882a593Smuzhiyun }
4226*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun return 0;
4229*4882a593Smuzhiyun }
4230*4882a593Smuzhiyun
4231*4882a593Smuzhiyun /**
4232*4882a593Smuzhiyun * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
4233*4882a593Smuzhiyun * @instance: Adapter soft state
4234*4882a593Smuzhiyun */
megasas_teardown_frame_pool(struct megasas_instance * instance)4235*4882a593Smuzhiyun static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4236*4882a593Smuzhiyun {
4237*4882a593Smuzhiyun int i;
4238*4882a593Smuzhiyun u16 max_cmd = instance->max_mfi_cmds;
4239*4882a593Smuzhiyun struct megasas_cmd *cmd;
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun if (!instance->frame_dma_pool)
4242*4882a593Smuzhiyun return;
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun /*
4245*4882a593Smuzhiyun * Return all frames to pool
4246*4882a593Smuzhiyun */
4247*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
4248*4882a593Smuzhiyun
4249*4882a593Smuzhiyun cmd = instance->cmd_list[i];
4250*4882a593Smuzhiyun
4251*4882a593Smuzhiyun if (cmd->frame)
4252*4882a593Smuzhiyun dma_pool_free(instance->frame_dma_pool, cmd->frame,
4253*4882a593Smuzhiyun cmd->frame_phys_addr);
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun if (cmd->sense)
4256*4882a593Smuzhiyun dma_pool_free(instance->sense_dma_pool, cmd->sense,
4257*4882a593Smuzhiyun cmd->sense_phys_addr);
4258*4882a593Smuzhiyun }
4259*4882a593Smuzhiyun
4260*4882a593Smuzhiyun /*
4261*4882a593Smuzhiyun * Now destroy the pool itself
4262*4882a593Smuzhiyun */
4263*4882a593Smuzhiyun dma_pool_destroy(instance->frame_dma_pool);
4264*4882a593Smuzhiyun dma_pool_destroy(instance->sense_dma_pool);
4265*4882a593Smuzhiyun
4266*4882a593Smuzhiyun instance->frame_dma_pool = NULL;
4267*4882a593Smuzhiyun instance->sense_dma_pool = NULL;
4268*4882a593Smuzhiyun }
4269*4882a593Smuzhiyun
4270*4882a593Smuzhiyun /**
4271*4882a593Smuzhiyun * megasas_create_frame_pool - Creates DMA pool for cmd frames
4272*4882a593Smuzhiyun * @instance: Adapter soft state
4273*4882a593Smuzhiyun *
4274*4882a593Smuzhiyun * Each command packet has an embedded DMA memory buffer that is used for
4275*4882a593Smuzhiyun * filling MFI frame and the SG list that immediately follows the frame. This
4276*4882a593Smuzhiyun * function creates those DMA memory buffers for each command packet by using
4277*4882a593Smuzhiyun * PCI pool facility.
4278*4882a593Smuzhiyun */
megasas_create_frame_pool(struct megasas_instance * instance)4279*4882a593Smuzhiyun static int megasas_create_frame_pool(struct megasas_instance *instance)
4280*4882a593Smuzhiyun {
4281*4882a593Smuzhiyun int i;
4282*4882a593Smuzhiyun u16 max_cmd;
4283*4882a593Smuzhiyun u32 frame_count;
4284*4882a593Smuzhiyun struct megasas_cmd *cmd;
4285*4882a593Smuzhiyun
4286*4882a593Smuzhiyun max_cmd = instance->max_mfi_cmds;
4287*4882a593Smuzhiyun
4288*4882a593Smuzhiyun /*
4289*4882a593Smuzhiyun * For MFI controllers.
4290*4882a593Smuzhiyun * max_num_sge = 60
4291*4882a593Smuzhiyun * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
4292*4882a593Smuzhiyun * Total 960 byte (15 MFI frame of 64 byte)
4293*4882a593Smuzhiyun *
4294*4882a593Smuzhiyun * Fusion adapter require only 3 extra frame.
4295*4882a593Smuzhiyun * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4296*4882a593Smuzhiyun * max_sge_sz = 12 byte (sizeof megasas_sge64)
4297*4882a593Smuzhiyun * Total 192 byte (3 MFI frame of 64 byte)
4298*4882a593Smuzhiyun */
4299*4882a593Smuzhiyun frame_count = (instance->adapter_type == MFI_SERIES) ?
4300*4882a593Smuzhiyun (15 + 1) : (3 + 1);
4301*4882a593Smuzhiyun instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4302*4882a593Smuzhiyun /*
4303*4882a593Smuzhiyun * Use DMA pool facility provided by PCI layer
4304*4882a593Smuzhiyun */
4305*4882a593Smuzhiyun instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4306*4882a593Smuzhiyun &instance->pdev->dev,
4307*4882a593Smuzhiyun instance->mfi_frame_size, 256, 0);
4308*4882a593Smuzhiyun
4309*4882a593Smuzhiyun if (!instance->frame_dma_pool) {
4310*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4311*4882a593Smuzhiyun return -ENOMEM;
4312*4882a593Smuzhiyun }
4313*4882a593Smuzhiyun
4314*4882a593Smuzhiyun instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4315*4882a593Smuzhiyun &instance->pdev->dev, 128,
4316*4882a593Smuzhiyun 4, 0);
4317*4882a593Smuzhiyun
4318*4882a593Smuzhiyun if (!instance->sense_dma_pool) {
4319*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4320*4882a593Smuzhiyun
4321*4882a593Smuzhiyun dma_pool_destroy(instance->frame_dma_pool);
4322*4882a593Smuzhiyun instance->frame_dma_pool = NULL;
4323*4882a593Smuzhiyun
4324*4882a593Smuzhiyun return -ENOMEM;
4325*4882a593Smuzhiyun }
4326*4882a593Smuzhiyun
4327*4882a593Smuzhiyun /*
4328*4882a593Smuzhiyun * Allocate and attach a frame to each of the commands in cmd_list.
4329*4882a593Smuzhiyun * By making cmd->index as the context instead of the &cmd, we can
4330*4882a593Smuzhiyun * always use 32bit context regardless of the architecture
4331*4882a593Smuzhiyun */
4332*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
4333*4882a593Smuzhiyun
4334*4882a593Smuzhiyun cmd = instance->cmd_list[i];
4335*4882a593Smuzhiyun
4336*4882a593Smuzhiyun cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4337*4882a593Smuzhiyun GFP_KERNEL, &cmd->frame_phys_addr);
4338*4882a593Smuzhiyun
4339*4882a593Smuzhiyun cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4340*4882a593Smuzhiyun GFP_KERNEL, &cmd->sense_phys_addr);
4341*4882a593Smuzhiyun
4342*4882a593Smuzhiyun /*
4343*4882a593Smuzhiyun * megasas_teardown_frame_pool() takes care of freeing
4344*4882a593Smuzhiyun * whatever has been allocated
4345*4882a593Smuzhiyun */
4346*4882a593Smuzhiyun if (!cmd->frame || !cmd->sense) {
4347*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4348*4882a593Smuzhiyun megasas_teardown_frame_pool(instance);
4349*4882a593Smuzhiyun return -ENOMEM;
4350*4882a593Smuzhiyun }
4351*4882a593Smuzhiyun
4352*4882a593Smuzhiyun cmd->frame->io.context = cpu_to_le32(cmd->index);
4353*4882a593Smuzhiyun cmd->frame->io.pad_0 = 0;
4354*4882a593Smuzhiyun if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4355*4882a593Smuzhiyun cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4356*4882a593Smuzhiyun }
4357*4882a593Smuzhiyun
4358*4882a593Smuzhiyun return 0;
4359*4882a593Smuzhiyun }
4360*4882a593Smuzhiyun
4361*4882a593Smuzhiyun /**
4362*4882a593Smuzhiyun * megasas_free_cmds - Free all the cmds in the free cmd pool
4363*4882a593Smuzhiyun * @instance: Adapter soft state
4364*4882a593Smuzhiyun */
megasas_free_cmds(struct megasas_instance * instance)4365*4882a593Smuzhiyun void megasas_free_cmds(struct megasas_instance *instance)
4366*4882a593Smuzhiyun {
4367*4882a593Smuzhiyun int i;
4368*4882a593Smuzhiyun
4369*4882a593Smuzhiyun /* First free the MFI frame pool */
4370*4882a593Smuzhiyun megasas_teardown_frame_pool(instance);
4371*4882a593Smuzhiyun
4372*4882a593Smuzhiyun /* Free all the commands in the cmd_list */
4373*4882a593Smuzhiyun for (i = 0; i < instance->max_mfi_cmds; i++)
4374*4882a593Smuzhiyun
4375*4882a593Smuzhiyun kfree(instance->cmd_list[i]);
4376*4882a593Smuzhiyun
4377*4882a593Smuzhiyun /* Free the cmd_list buffer itself */
4378*4882a593Smuzhiyun kfree(instance->cmd_list);
4379*4882a593Smuzhiyun instance->cmd_list = NULL;
4380*4882a593Smuzhiyun
4381*4882a593Smuzhiyun INIT_LIST_HEAD(&instance->cmd_pool);
4382*4882a593Smuzhiyun }
4383*4882a593Smuzhiyun
4384*4882a593Smuzhiyun /**
4385*4882a593Smuzhiyun * megasas_alloc_cmds - Allocates the command packets
4386*4882a593Smuzhiyun * @instance: Adapter soft state
4387*4882a593Smuzhiyun *
4388*4882a593Smuzhiyun * Each command that is issued to the FW, whether IO commands from the OS or
4389*4882a593Smuzhiyun * internal commands like IOCTLs, are wrapped in local data structure called
4390*4882a593Smuzhiyun * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4391*4882a593Smuzhiyun * the FW.
4392*4882a593Smuzhiyun *
4393*4882a593Smuzhiyun * Each frame has a 32-bit field called context (tag). This context is used
4394*4882a593Smuzhiyun * to get back the megasas_cmd from the frame when a frame gets completed in
4395*4882a593Smuzhiyun * the ISR. Typically the address of the megasas_cmd itself would be used as
4396*4882a593Smuzhiyun * the context. But we wanted to keep the differences between 32 and 64 bit
4397*4882a593Smuzhiyun * systems to the mininum. We always use 32 bit integers for the context. In
4398*4882a593Smuzhiyun * this driver, the 32 bit values are the indices into an array cmd_list.
4399*4882a593Smuzhiyun * This array is used only to look up the megasas_cmd given the context. The
4400*4882a593Smuzhiyun * free commands themselves are maintained in a linked list called cmd_pool.
4401*4882a593Smuzhiyun */
megasas_alloc_cmds(struct megasas_instance * instance)4402*4882a593Smuzhiyun int megasas_alloc_cmds(struct megasas_instance *instance)
4403*4882a593Smuzhiyun {
4404*4882a593Smuzhiyun int i;
4405*4882a593Smuzhiyun int j;
4406*4882a593Smuzhiyun u16 max_cmd;
4407*4882a593Smuzhiyun struct megasas_cmd *cmd;
4408*4882a593Smuzhiyun
4409*4882a593Smuzhiyun max_cmd = instance->max_mfi_cmds;
4410*4882a593Smuzhiyun
4411*4882a593Smuzhiyun /*
4412*4882a593Smuzhiyun * instance->cmd_list is an array of struct megasas_cmd pointers.
4413*4882a593Smuzhiyun * Allocate the dynamic array first and then allocate individual
4414*4882a593Smuzhiyun * commands.
4415*4882a593Smuzhiyun */
4416*4882a593Smuzhiyun instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4417*4882a593Smuzhiyun
4418*4882a593Smuzhiyun if (!instance->cmd_list) {
4419*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4420*4882a593Smuzhiyun return -ENOMEM;
4421*4882a593Smuzhiyun }
4422*4882a593Smuzhiyun
4423*4882a593Smuzhiyun memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4424*4882a593Smuzhiyun
4425*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
4426*4882a593Smuzhiyun instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4427*4882a593Smuzhiyun GFP_KERNEL);
4428*4882a593Smuzhiyun
4429*4882a593Smuzhiyun if (!instance->cmd_list[i]) {
4430*4882a593Smuzhiyun
4431*4882a593Smuzhiyun for (j = 0; j < i; j++)
4432*4882a593Smuzhiyun kfree(instance->cmd_list[j]);
4433*4882a593Smuzhiyun
4434*4882a593Smuzhiyun kfree(instance->cmd_list);
4435*4882a593Smuzhiyun instance->cmd_list = NULL;
4436*4882a593Smuzhiyun
4437*4882a593Smuzhiyun return -ENOMEM;
4438*4882a593Smuzhiyun }
4439*4882a593Smuzhiyun }
4440*4882a593Smuzhiyun
4441*4882a593Smuzhiyun for (i = 0; i < max_cmd; i++) {
4442*4882a593Smuzhiyun cmd = instance->cmd_list[i];
4443*4882a593Smuzhiyun memset(cmd, 0, sizeof(struct megasas_cmd));
4444*4882a593Smuzhiyun cmd->index = i;
4445*4882a593Smuzhiyun cmd->scmd = NULL;
4446*4882a593Smuzhiyun cmd->instance = instance;
4447*4882a593Smuzhiyun
4448*4882a593Smuzhiyun list_add_tail(&cmd->list, &instance->cmd_pool);
4449*4882a593Smuzhiyun }
4450*4882a593Smuzhiyun
4451*4882a593Smuzhiyun /*
4452*4882a593Smuzhiyun * Create a frame pool and assign one frame to each cmd
4453*4882a593Smuzhiyun */
4454*4882a593Smuzhiyun if (megasas_create_frame_pool(instance)) {
4455*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4456*4882a593Smuzhiyun megasas_free_cmds(instance);
4457*4882a593Smuzhiyun return -ENOMEM;
4458*4882a593Smuzhiyun }
4459*4882a593Smuzhiyun
4460*4882a593Smuzhiyun return 0;
4461*4882a593Smuzhiyun }
4462*4882a593Smuzhiyun
4463*4882a593Smuzhiyun /*
4464*4882a593Smuzhiyun * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4465*4882a593Smuzhiyun * @instance: Adapter soft state
4466*4882a593Smuzhiyun *
4467*4882a593Smuzhiyun * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4468*4882a593Smuzhiyun * or FW is not under OCR.
4469*4882a593Smuzhiyun */
4470*4882a593Smuzhiyun inline int
dcmd_timeout_ocr_possible(struct megasas_instance * instance)4471*4882a593Smuzhiyun dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4472*4882a593Smuzhiyun
4473*4882a593Smuzhiyun if (instance->adapter_type == MFI_SERIES)
4474*4882a593Smuzhiyun return KILL_ADAPTER;
4475*4882a593Smuzhiyun else if (instance->unload ||
4476*4882a593Smuzhiyun test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4477*4882a593Smuzhiyun &instance->reset_flags))
4478*4882a593Smuzhiyun return IGNORE_TIMEOUT;
4479*4882a593Smuzhiyun else
4480*4882a593Smuzhiyun return INITIATE_OCR;
4481*4882a593Smuzhiyun }
4482*4882a593Smuzhiyun
4483*4882a593Smuzhiyun static void
megasas_get_pd_info(struct megasas_instance * instance,struct scsi_device * sdev)4484*4882a593Smuzhiyun megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4485*4882a593Smuzhiyun {
4486*4882a593Smuzhiyun int ret;
4487*4882a593Smuzhiyun struct megasas_cmd *cmd;
4488*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
4489*4882a593Smuzhiyun
4490*4882a593Smuzhiyun struct MR_PRIV_DEVICE *mr_device_priv_data;
4491*4882a593Smuzhiyun u16 device_id = 0;
4492*4882a593Smuzhiyun
4493*4882a593Smuzhiyun device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4494*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
4495*4882a593Smuzhiyun
4496*4882a593Smuzhiyun if (!cmd) {
4497*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4498*4882a593Smuzhiyun return;
4499*4882a593Smuzhiyun }
4500*4882a593Smuzhiyun
4501*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
4502*4882a593Smuzhiyun
4503*4882a593Smuzhiyun memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4504*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4505*4882a593Smuzhiyun
4506*4882a593Smuzhiyun dcmd->mbox.s[0] = cpu_to_le16(device_id);
4507*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
4508*4882a593Smuzhiyun dcmd->cmd_status = 0xFF;
4509*4882a593Smuzhiyun dcmd->sge_count = 1;
4510*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
4511*4882a593Smuzhiyun dcmd->timeout = 0;
4512*4882a593Smuzhiyun dcmd->pad_0 = 0;
4513*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4514*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4515*4882a593Smuzhiyun
4516*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4517*4882a593Smuzhiyun sizeof(struct MR_PD_INFO));
4518*4882a593Smuzhiyun
4519*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
4520*4882a593Smuzhiyun !instance->mask_interrupts)
4521*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4522*4882a593Smuzhiyun else
4523*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
4524*4882a593Smuzhiyun
4525*4882a593Smuzhiyun switch (ret) {
4526*4882a593Smuzhiyun case DCMD_SUCCESS:
4527*4882a593Smuzhiyun mr_device_priv_data = sdev->hostdata;
4528*4882a593Smuzhiyun le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4529*4882a593Smuzhiyun mr_device_priv_data->interface_type =
4530*4882a593Smuzhiyun instance->pd_info->state.ddf.pdType.intf;
4531*4882a593Smuzhiyun break;
4532*4882a593Smuzhiyun
4533*4882a593Smuzhiyun case DCMD_TIMEOUT:
4534*4882a593Smuzhiyun
4535*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
4536*4882a593Smuzhiyun case INITIATE_OCR:
4537*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4538*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
4539*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
4540*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
4541*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
4542*4882a593Smuzhiyun break;
4543*4882a593Smuzhiyun case KILL_ADAPTER:
4544*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4545*4882a593Smuzhiyun break;
4546*4882a593Smuzhiyun case IGNORE_TIMEOUT:
4547*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4548*4882a593Smuzhiyun __func__, __LINE__);
4549*4882a593Smuzhiyun break;
4550*4882a593Smuzhiyun }
4551*4882a593Smuzhiyun
4552*4882a593Smuzhiyun break;
4553*4882a593Smuzhiyun }
4554*4882a593Smuzhiyun
4555*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
4556*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
4557*4882a593Smuzhiyun
4558*4882a593Smuzhiyun return;
4559*4882a593Smuzhiyun }
4560*4882a593Smuzhiyun /*
4561*4882a593Smuzhiyun * megasas_get_pd_list_info - Returns FW's pd_list structure
4562*4882a593Smuzhiyun * @instance: Adapter soft state
4563*4882a593Smuzhiyun * @pd_list: pd_list structure
4564*4882a593Smuzhiyun *
4565*4882a593Smuzhiyun * Issues an internal command (DCMD) to get the FW's controller PD
4566*4882a593Smuzhiyun * list structure. This information is mainly used to find out SYSTEM
4567*4882a593Smuzhiyun * supported by the FW.
4568*4882a593Smuzhiyun */
4569*4882a593Smuzhiyun static int
megasas_get_pd_list(struct megasas_instance * instance)4570*4882a593Smuzhiyun megasas_get_pd_list(struct megasas_instance *instance)
4571*4882a593Smuzhiyun {
4572*4882a593Smuzhiyun int ret = 0, pd_index = 0;
4573*4882a593Smuzhiyun struct megasas_cmd *cmd;
4574*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
4575*4882a593Smuzhiyun struct MR_PD_LIST *ci;
4576*4882a593Smuzhiyun struct MR_PD_ADDRESS *pd_addr;
4577*4882a593Smuzhiyun
4578*4882a593Smuzhiyun if (instance->pd_list_not_supported) {
4579*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4580*4882a593Smuzhiyun "not supported by firmware\n");
4581*4882a593Smuzhiyun return ret;
4582*4882a593Smuzhiyun }
4583*4882a593Smuzhiyun
4584*4882a593Smuzhiyun ci = instance->pd_list_buf;
4585*4882a593Smuzhiyun
4586*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
4587*4882a593Smuzhiyun
4588*4882a593Smuzhiyun if (!cmd) {
4589*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4590*4882a593Smuzhiyun return -ENOMEM;
4591*4882a593Smuzhiyun }
4592*4882a593Smuzhiyun
4593*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
4594*4882a593Smuzhiyun
4595*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
4596*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4597*4882a593Smuzhiyun
4598*4882a593Smuzhiyun dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4599*4882a593Smuzhiyun dcmd->mbox.b[1] = 0;
4600*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
4601*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4602*4882a593Smuzhiyun dcmd->sge_count = 1;
4603*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
4604*4882a593Smuzhiyun dcmd->timeout = 0;
4605*4882a593Smuzhiyun dcmd->pad_0 = 0;
4606*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4607*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4608*4882a593Smuzhiyun
4609*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4610*4882a593Smuzhiyun (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4611*4882a593Smuzhiyun
4612*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
4613*4882a593Smuzhiyun !instance->mask_interrupts)
4614*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd,
4615*4882a593Smuzhiyun MFI_IO_TIMEOUT_SECS);
4616*4882a593Smuzhiyun else
4617*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
4618*4882a593Smuzhiyun
4619*4882a593Smuzhiyun switch (ret) {
4620*4882a593Smuzhiyun case DCMD_FAILED:
4621*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4622*4882a593Smuzhiyun "failed/not supported by firmware\n");
4623*4882a593Smuzhiyun
4624*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
4625*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4626*4882a593Smuzhiyun else
4627*4882a593Smuzhiyun instance->pd_list_not_supported = 1;
4628*4882a593Smuzhiyun break;
4629*4882a593Smuzhiyun case DCMD_TIMEOUT:
4630*4882a593Smuzhiyun
4631*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
4632*4882a593Smuzhiyun case INITIATE_OCR:
4633*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4634*4882a593Smuzhiyun /*
4635*4882a593Smuzhiyun * DCMD failed from AEN path.
4636*4882a593Smuzhiyun * AEN path already hold reset_mutex to avoid PCI access
4637*4882a593Smuzhiyun * while OCR is in progress.
4638*4882a593Smuzhiyun */
4639*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
4640*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
4641*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
4642*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
4643*4882a593Smuzhiyun break;
4644*4882a593Smuzhiyun case KILL_ADAPTER:
4645*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4646*4882a593Smuzhiyun break;
4647*4882a593Smuzhiyun case IGNORE_TIMEOUT:
4648*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4649*4882a593Smuzhiyun __func__, __LINE__);
4650*4882a593Smuzhiyun break;
4651*4882a593Smuzhiyun }
4652*4882a593Smuzhiyun
4653*4882a593Smuzhiyun break;
4654*4882a593Smuzhiyun
4655*4882a593Smuzhiyun case DCMD_SUCCESS:
4656*4882a593Smuzhiyun pd_addr = ci->addr;
4657*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4658*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4659*4882a593Smuzhiyun __func__, le32_to_cpu(ci->count));
4660*4882a593Smuzhiyun
4661*4882a593Smuzhiyun if ((le32_to_cpu(ci->count) >
4662*4882a593Smuzhiyun (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4663*4882a593Smuzhiyun break;
4664*4882a593Smuzhiyun
4665*4882a593Smuzhiyun memset(instance->local_pd_list, 0,
4666*4882a593Smuzhiyun MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4667*4882a593Smuzhiyun
4668*4882a593Smuzhiyun for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4669*4882a593Smuzhiyun instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4670*4882a593Smuzhiyun le16_to_cpu(pd_addr->deviceId);
4671*4882a593Smuzhiyun instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4672*4882a593Smuzhiyun pd_addr->scsiDevType;
4673*4882a593Smuzhiyun instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4674*4882a593Smuzhiyun MR_PD_STATE_SYSTEM;
4675*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4676*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
4677*4882a593Smuzhiyun "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4678*4882a593Smuzhiyun pd_index, le16_to_cpu(pd_addr->deviceId),
4679*4882a593Smuzhiyun pd_addr->scsiDevType);
4680*4882a593Smuzhiyun pd_addr++;
4681*4882a593Smuzhiyun }
4682*4882a593Smuzhiyun
4683*4882a593Smuzhiyun memcpy(instance->pd_list, instance->local_pd_list,
4684*4882a593Smuzhiyun sizeof(instance->pd_list));
4685*4882a593Smuzhiyun break;
4686*4882a593Smuzhiyun
4687*4882a593Smuzhiyun }
4688*4882a593Smuzhiyun
4689*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
4690*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
4691*4882a593Smuzhiyun
4692*4882a593Smuzhiyun return ret;
4693*4882a593Smuzhiyun }
4694*4882a593Smuzhiyun
4695*4882a593Smuzhiyun /*
4696*4882a593Smuzhiyun * megasas_get_ld_list_info - Returns FW's ld_list structure
4697*4882a593Smuzhiyun * @instance: Adapter soft state
4698*4882a593Smuzhiyun * @ld_list: ld_list structure
4699*4882a593Smuzhiyun *
4700*4882a593Smuzhiyun * Issues an internal command (DCMD) to get the FW's controller PD
4701*4882a593Smuzhiyun * list structure. This information is mainly used to find out SYSTEM
4702*4882a593Smuzhiyun * supported by the FW.
4703*4882a593Smuzhiyun */
4704*4882a593Smuzhiyun static int
megasas_get_ld_list(struct megasas_instance * instance)4705*4882a593Smuzhiyun megasas_get_ld_list(struct megasas_instance *instance)
4706*4882a593Smuzhiyun {
4707*4882a593Smuzhiyun int ret = 0, ld_index = 0, ids = 0;
4708*4882a593Smuzhiyun struct megasas_cmd *cmd;
4709*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
4710*4882a593Smuzhiyun struct MR_LD_LIST *ci;
4711*4882a593Smuzhiyun dma_addr_t ci_h = 0;
4712*4882a593Smuzhiyun u32 ld_count;
4713*4882a593Smuzhiyun
4714*4882a593Smuzhiyun ci = instance->ld_list_buf;
4715*4882a593Smuzhiyun ci_h = instance->ld_list_buf_h;
4716*4882a593Smuzhiyun
4717*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
4718*4882a593Smuzhiyun
4719*4882a593Smuzhiyun if (!cmd) {
4720*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4721*4882a593Smuzhiyun return -ENOMEM;
4722*4882a593Smuzhiyun }
4723*4882a593Smuzhiyun
4724*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
4725*4882a593Smuzhiyun
4726*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
4727*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4728*4882a593Smuzhiyun
4729*4882a593Smuzhiyun if (instance->supportmax256vd)
4730*4882a593Smuzhiyun dcmd->mbox.b[0] = 1;
4731*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
4732*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4733*4882a593Smuzhiyun dcmd->sge_count = 1;
4734*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
4735*4882a593Smuzhiyun dcmd->timeout = 0;
4736*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4737*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4738*4882a593Smuzhiyun dcmd->pad_0 = 0;
4739*4882a593Smuzhiyun
4740*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, ci_h,
4741*4882a593Smuzhiyun sizeof(struct MR_LD_LIST));
4742*4882a593Smuzhiyun
4743*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
4744*4882a593Smuzhiyun !instance->mask_interrupts)
4745*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd,
4746*4882a593Smuzhiyun MFI_IO_TIMEOUT_SECS);
4747*4882a593Smuzhiyun else
4748*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
4749*4882a593Smuzhiyun
4750*4882a593Smuzhiyun ld_count = le32_to_cpu(ci->ldCount);
4751*4882a593Smuzhiyun
4752*4882a593Smuzhiyun switch (ret) {
4753*4882a593Smuzhiyun case DCMD_FAILED:
4754*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4755*4882a593Smuzhiyun break;
4756*4882a593Smuzhiyun case DCMD_TIMEOUT:
4757*4882a593Smuzhiyun
4758*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
4759*4882a593Smuzhiyun case INITIATE_OCR:
4760*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4761*4882a593Smuzhiyun /*
4762*4882a593Smuzhiyun * DCMD failed from AEN path.
4763*4882a593Smuzhiyun * AEN path already hold reset_mutex to avoid PCI access
4764*4882a593Smuzhiyun * while OCR is in progress.
4765*4882a593Smuzhiyun */
4766*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
4767*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
4768*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
4769*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
4770*4882a593Smuzhiyun break;
4771*4882a593Smuzhiyun case KILL_ADAPTER:
4772*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4773*4882a593Smuzhiyun break;
4774*4882a593Smuzhiyun case IGNORE_TIMEOUT:
4775*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4776*4882a593Smuzhiyun __func__, __LINE__);
4777*4882a593Smuzhiyun break;
4778*4882a593Smuzhiyun }
4779*4882a593Smuzhiyun
4780*4882a593Smuzhiyun break;
4781*4882a593Smuzhiyun
4782*4882a593Smuzhiyun case DCMD_SUCCESS:
4783*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4784*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4785*4882a593Smuzhiyun __func__, ld_count);
4786*4882a593Smuzhiyun
4787*4882a593Smuzhiyun if (ld_count > instance->fw_supported_vd_count)
4788*4882a593Smuzhiyun break;
4789*4882a593Smuzhiyun
4790*4882a593Smuzhiyun memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4791*4882a593Smuzhiyun
4792*4882a593Smuzhiyun for (ld_index = 0; ld_index < ld_count; ld_index++) {
4793*4882a593Smuzhiyun if (ci->ldList[ld_index].state != 0) {
4794*4882a593Smuzhiyun ids = ci->ldList[ld_index].ref.targetId;
4795*4882a593Smuzhiyun instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4796*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4797*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
4798*4882a593Smuzhiyun "LD%d: targetID: 0x%03x\n",
4799*4882a593Smuzhiyun ld_index, ids);
4800*4882a593Smuzhiyun }
4801*4882a593Smuzhiyun }
4802*4882a593Smuzhiyun
4803*4882a593Smuzhiyun break;
4804*4882a593Smuzhiyun }
4805*4882a593Smuzhiyun
4806*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
4807*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
4808*4882a593Smuzhiyun
4809*4882a593Smuzhiyun return ret;
4810*4882a593Smuzhiyun }
4811*4882a593Smuzhiyun
4812*4882a593Smuzhiyun /**
4813*4882a593Smuzhiyun * megasas_ld_list_query - Returns FW's ld_list structure
4814*4882a593Smuzhiyun * @instance: Adapter soft state
4815*4882a593Smuzhiyun * @query_type: ld_list structure type
4816*4882a593Smuzhiyun *
4817*4882a593Smuzhiyun * Issues an internal command (DCMD) to get the FW's controller PD
4818*4882a593Smuzhiyun * list structure. This information is mainly used to find out SYSTEM
4819*4882a593Smuzhiyun * supported by the FW.
4820*4882a593Smuzhiyun */
4821*4882a593Smuzhiyun static int
megasas_ld_list_query(struct megasas_instance * instance,u8 query_type)4822*4882a593Smuzhiyun megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4823*4882a593Smuzhiyun {
4824*4882a593Smuzhiyun int ret = 0, ld_index = 0, ids = 0;
4825*4882a593Smuzhiyun struct megasas_cmd *cmd;
4826*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
4827*4882a593Smuzhiyun struct MR_LD_TARGETID_LIST *ci;
4828*4882a593Smuzhiyun dma_addr_t ci_h = 0;
4829*4882a593Smuzhiyun u32 tgtid_count;
4830*4882a593Smuzhiyun
4831*4882a593Smuzhiyun ci = instance->ld_targetid_list_buf;
4832*4882a593Smuzhiyun ci_h = instance->ld_targetid_list_buf_h;
4833*4882a593Smuzhiyun
4834*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
4835*4882a593Smuzhiyun
4836*4882a593Smuzhiyun if (!cmd) {
4837*4882a593Smuzhiyun dev_warn(&instance->pdev->dev,
4838*4882a593Smuzhiyun "megasas_ld_list_query: Failed to get cmd\n");
4839*4882a593Smuzhiyun return -ENOMEM;
4840*4882a593Smuzhiyun }
4841*4882a593Smuzhiyun
4842*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
4843*4882a593Smuzhiyun
4844*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
4845*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4846*4882a593Smuzhiyun
4847*4882a593Smuzhiyun dcmd->mbox.b[0] = query_type;
4848*4882a593Smuzhiyun if (instance->supportmax256vd)
4849*4882a593Smuzhiyun dcmd->mbox.b[2] = 1;
4850*4882a593Smuzhiyun
4851*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
4852*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4853*4882a593Smuzhiyun dcmd->sge_count = 1;
4854*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
4855*4882a593Smuzhiyun dcmd->timeout = 0;
4856*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4857*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4858*4882a593Smuzhiyun dcmd->pad_0 = 0;
4859*4882a593Smuzhiyun
4860*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, ci_h,
4861*4882a593Smuzhiyun sizeof(struct MR_LD_TARGETID_LIST));
4862*4882a593Smuzhiyun
4863*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
4864*4882a593Smuzhiyun !instance->mask_interrupts)
4865*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4866*4882a593Smuzhiyun else
4867*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
4868*4882a593Smuzhiyun
4869*4882a593Smuzhiyun switch (ret) {
4870*4882a593Smuzhiyun case DCMD_FAILED:
4871*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
4872*4882a593Smuzhiyun "DCMD not supported by firmware - %s %d\n",
4873*4882a593Smuzhiyun __func__, __LINE__);
4874*4882a593Smuzhiyun ret = megasas_get_ld_list(instance);
4875*4882a593Smuzhiyun break;
4876*4882a593Smuzhiyun case DCMD_TIMEOUT:
4877*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
4878*4882a593Smuzhiyun case INITIATE_OCR:
4879*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4880*4882a593Smuzhiyun /*
4881*4882a593Smuzhiyun * DCMD failed from AEN path.
4882*4882a593Smuzhiyun * AEN path already hold reset_mutex to avoid PCI access
4883*4882a593Smuzhiyun * while OCR is in progress.
4884*4882a593Smuzhiyun */
4885*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
4886*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
4887*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
4888*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
4889*4882a593Smuzhiyun break;
4890*4882a593Smuzhiyun case KILL_ADAPTER:
4891*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
4892*4882a593Smuzhiyun break;
4893*4882a593Smuzhiyun case IGNORE_TIMEOUT:
4894*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4895*4882a593Smuzhiyun __func__, __LINE__);
4896*4882a593Smuzhiyun break;
4897*4882a593Smuzhiyun }
4898*4882a593Smuzhiyun
4899*4882a593Smuzhiyun break;
4900*4882a593Smuzhiyun case DCMD_SUCCESS:
4901*4882a593Smuzhiyun tgtid_count = le32_to_cpu(ci->count);
4902*4882a593Smuzhiyun
4903*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4904*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4905*4882a593Smuzhiyun __func__, tgtid_count);
4906*4882a593Smuzhiyun
4907*4882a593Smuzhiyun if ((tgtid_count > (instance->fw_supported_vd_count)))
4908*4882a593Smuzhiyun break;
4909*4882a593Smuzhiyun
4910*4882a593Smuzhiyun memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4911*4882a593Smuzhiyun for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4912*4882a593Smuzhiyun ids = ci->targetId[ld_index];
4913*4882a593Smuzhiyun instance->ld_ids[ids] = ci->targetId[ld_index];
4914*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
4915*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4916*4882a593Smuzhiyun ld_index, ci->targetId[ld_index]);
4917*4882a593Smuzhiyun }
4918*4882a593Smuzhiyun
4919*4882a593Smuzhiyun break;
4920*4882a593Smuzhiyun }
4921*4882a593Smuzhiyun
4922*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
4923*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
4924*4882a593Smuzhiyun
4925*4882a593Smuzhiyun return ret;
4926*4882a593Smuzhiyun }
4927*4882a593Smuzhiyun
4928*4882a593Smuzhiyun /**
4929*4882a593Smuzhiyun * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
4930*4882a593Smuzhiyun * dcmd.mbox - reserved
4931*4882a593Smuzhiyun * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
4932*4882a593Smuzhiyun * Desc: This DCMD will return the combined device list
4933*4882a593Smuzhiyun * Status: MFI_STAT_OK - List returned successfully
4934*4882a593Smuzhiyun * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4935*4882a593Smuzhiyun * disabled
4936*4882a593Smuzhiyun * @instance: Adapter soft state
4937*4882a593Smuzhiyun * @is_probe: Driver probe check
4938*4882a593Smuzhiyun * Return: 0 if DCMD succeeded
4939*4882a593Smuzhiyun * non-zero if failed
4940*4882a593Smuzhiyun */
4941*4882a593Smuzhiyun static int
megasas_host_device_list_query(struct megasas_instance * instance,bool is_probe)4942*4882a593Smuzhiyun megasas_host_device_list_query(struct megasas_instance *instance,
4943*4882a593Smuzhiyun bool is_probe)
4944*4882a593Smuzhiyun {
4945*4882a593Smuzhiyun int ret, i, target_id;
4946*4882a593Smuzhiyun struct megasas_cmd *cmd;
4947*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
4948*4882a593Smuzhiyun struct MR_HOST_DEVICE_LIST *ci;
4949*4882a593Smuzhiyun u32 count;
4950*4882a593Smuzhiyun dma_addr_t ci_h;
4951*4882a593Smuzhiyun
4952*4882a593Smuzhiyun ci = instance->host_device_list_buf;
4953*4882a593Smuzhiyun ci_h = instance->host_device_list_buf_h;
4954*4882a593Smuzhiyun
4955*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
4956*4882a593Smuzhiyun
4957*4882a593Smuzhiyun if (!cmd) {
4958*4882a593Smuzhiyun dev_warn(&instance->pdev->dev,
4959*4882a593Smuzhiyun "%s: failed to get cmd\n",
4960*4882a593Smuzhiyun __func__);
4961*4882a593Smuzhiyun return -ENOMEM;
4962*4882a593Smuzhiyun }
4963*4882a593Smuzhiyun
4964*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
4965*4882a593Smuzhiyun
4966*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
4967*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4968*4882a593Smuzhiyun
4969*4882a593Smuzhiyun dcmd->mbox.b[0] = is_probe ? 0 : 1;
4970*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
4971*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4972*4882a593Smuzhiyun dcmd->sge_count = 1;
4973*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
4974*4882a593Smuzhiyun dcmd->timeout = 0;
4975*4882a593Smuzhiyun dcmd->pad_0 = 0;
4976*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4977*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4978*4882a593Smuzhiyun
4979*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4980*4882a593Smuzhiyun
4981*4882a593Smuzhiyun if (!instance->mask_interrupts) {
4982*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd,
4983*4882a593Smuzhiyun MFI_IO_TIMEOUT_SECS);
4984*4882a593Smuzhiyun } else {
4985*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
4986*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4987*4882a593Smuzhiyun }
4988*4882a593Smuzhiyun
4989*4882a593Smuzhiyun switch (ret) {
4990*4882a593Smuzhiyun case DCMD_SUCCESS:
4991*4882a593Smuzhiyun /* Fill the internal pd_list and ld_ids array based on
4992*4882a593Smuzhiyun * targetIds returned by FW
4993*4882a593Smuzhiyun */
4994*4882a593Smuzhiyun count = le32_to_cpu(ci->count);
4995*4882a593Smuzhiyun
4996*4882a593Smuzhiyun if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4997*4882a593Smuzhiyun break;
4998*4882a593Smuzhiyun
4999*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
5000*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
5001*4882a593Smuzhiyun __func__, count);
5002*4882a593Smuzhiyun
5003*4882a593Smuzhiyun memset(instance->local_pd_list, 0,
5004*4882a593Smuzhiyun MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
5005*4882a593Smuzhiyun memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
5006*4882a593Smuzhiyun for (i = 0; i < count; i++) {
5007*4882a593Smuzhiyun target_id = le16_to_cpu(ci->host_device_list[i].target_id);
5008*4882a593Smuzhiyun if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
5009*4882a593Smuzhiyun instance->local_pd_list[target_id].tid = target_id;
5010*4882a593Smuzhiyun instance->local_pd_list[target_id].driveType =
5011*4882a593Smuzhiyun ci->host_device_list[i].scsi_type;
5012*4882a593Smuzhiyun instance->local_pd_list[target_id].driveState =
5013*4882a593Smuzhiyun MR_PD_STATE_SYSTEM;
5014*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
5015*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5016*4882a593Smuzhiyun "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
5017*4882a593Smuzhiyun i, target_id, ci->host_device_list[i].scsi_type);
5018*4882a593Smuzhiyun } else {
5019*4882a593Smuzhiyun instance->ld_ids[target_id] = target_id;
5020*4882a593Smuzhiyun if (megasas_dbg_lvl & LD_PD_DEBUG)
5021*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5022*4882a593Smuzhiyun "Device %d: LD targetID: 0x%03x\n",
5023*4882a593Smuzhiyun i, target_id);
5024*4882a593Smuzhiyun }
5025*4882a593Smuzhiyun }
5026*4882a593Smuzhiyun
5027*4882a593Smuzhiyun memcpy(instance->pd_list, instance->local_pd_list,
5028*4882a593Smuzhiyun sizeof(instance->pd_list));
5029*4882a593Smuzhiyun break;
5030*4882a593Smuzhiyun
5031*4882a593Smuzhiyun case DCMD_TIMEOUT:
5032*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
5033*4882a593Smuzhiyun case INITIATE_OCR:
5034*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5035*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
5036*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
5037*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
5038*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
5039*4882a593Smuzhiyun break;
5040*4882a593Smuzhiyun case KILL_ADAPTER:
5041*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
5042*4882a593Smuzhiyun break;
5043*4882a593Smuzhiyun case IGNORE_TIMEOUT:
5044*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5045*4882a593Smuzhiyun __func__, __LINE__);
5046*4882a593Smuzhiyun break;
5047*4882a593Smuzhiyun }
5048*4882a593Smuzhiyun break;
5049*4882a593Smuzhiyun case DCMD_FAILED:
5050*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
5051*4882a593Smuzhiyun "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
5052*4882a593Smuzhiyun __func__);
5053*4882a593Smuzhiyun break;
5054*4882a593Smuzhiyun }
5055*4882a593Smuzhiyun
5056*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
5057*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5058*4882a593Smuzhiyun
5059*4882a593Smuzhiyun return ret;
5060*4882a593Smuzhiyun }
5061*4882a593Smuzhiyun
5062*4882a593Smuzhiyun /*
5063*4882a593Smuzhiyun * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5064*4882a593Smuzhiyun * instance : Controller's instance
5065*4882a593Smuzhiyun */
megasas_update_ext_vd_details(struct megasas_instance * instance)5066*4882a593Smuzhiyun static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5067*4882a593Smuzhiyun {
5068*4882a593Smuzhiyun struct fusion_context *fusion;
5069*4882a593Smuzhiyun u32 ventura_map_sz = 0;
5070*4882a593Smuzhiyun
5071*4882a593Smuzhiyun fusion = instance->ctrl_context;
5072*4882a593Smuzhiyun /* For MFI based controllers return dummy success */
5073*4882a593Smuzhiyun if (!fusion)
5074*4882a593Smuzhiyun return;
5075*4882a593Smuzhiyun
5076*4882a593Smuzhiyun instance->supportmax256vd =
5077*4882a593Smuzhiyun instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5078*4882a593Smuzhiyun /* Below is additional check to address future FW enhancement */
5079*4882a593Smuzhiyun if (instance->ctrl_info_buf->max_lds > 64)
5080*4882a593Smuzhiyun instance->supportmax256vd = 1;
5081*4882a593Smuzhiyun
5082*4882a593Smuzhiyun instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5083*4882a593Smuzhiyun * MEGASAS_MAX_DEV_PER_CHANNEL;
5084*4882a593Smuzhiyun instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5085*4882a593Smuzhiyun * MEGASAS_MAX_DEV_PER_CHANNEL;
5086*4882a593Smuzhiyun if (instance->supportmax256vd) {
5087*4882a593Smuzhiyun instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5088*4882a593Smuzhiyun instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5089*4882a593Smuzhiyun } else {
5090*4882a593Smuzhiyun instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5091*4882a593Smuzhiyun instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5092*4882a593Smuzhiyun }
5093*4882a593Smuzhiyun
5094*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5095*4882a593Smuzhiyun "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5096*4882a593Smuzhiyun instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5097*4882a593Smuzhiyun instance->ctrl_info_buf->max_lds);
5098*4882a593Smuzhiyun
5099*4882a593Smuzhiyun if (instance->max_raid_mapsize) {
5100*4882a593Smuzhiyun ventura_map_sz = instance->max_raid_mapsize *
5101*4882a593Smuzhiyun MR_MIN_MAP_SIZE; /* 64k */
5102*4882a593Smuzhiyun fusion->current_map_sz = ventura_map_sz;
5103*4882a593Smuzhiyun fusion->max_map_sz = ventura_map_sz;
5104*4882a593Smuzhiyun } else {
5105*4882a593Smuzhiyun fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
5106*4882a593Smuzhiyun (sizeof(struct MR_LD_SPAN_MAP) *
5107*4882a593Smuzhiyun (instance->fw_supported_vd_count - 1));
5108*4882a593Smuzhiyun fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
5109*4882a593Smuzhiyun
5110*4882a593Smuzhiyun fusion->max_map_sz =
5111*4882a593Smuzhiyun max(fusion->old_map_sz, fusion->new_map_sz);
5112*4882a593Smuzhiyun
5113*4882a593Smuzhiyun if (instance->supportmax256vd)
5114*4882a593Smuzhiyun fusion->current_map_sz = fusion->new_map_sz;
5115*4882a593Smuzhiyun else
5116*4882a593Smuzhiyun fusion->current_map_sz = fusion->old_map_sz;
5117*4882a593Smuzhiyun }
5118*4882a593Smuzhiyun /* irrespective of FW raid maps, driver raid map is constant */
5119*4882a593Smuzhiyun fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5120*4882a593Smuzhiyun }
5121*4882a593Smuzhiyun
5122*4882a593Smuzhiyun /*
5123*4882a593Smuzhiyun * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5124*4882a593Smuzhiyun * dcmd.hdr.length - number of bytes to read
5125*4882a593Smuzhiyun * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
5126*4882a593Smuzhiyun * Desc: Fill in snapdump properties
5127*4882a593Smuzhiyun * Status: MFI_STAT_OK- Command successful
5128*4882a593Smuzhiyun */
megasas_get_snapdump_properties(struct megasas_instance * instance)5129*4882a593Smuzhiyun void megasas_get_snapdump_properties(struct megasas_instance *instance)
5130*4882a593Smuzhiyun {
5131*4882a593Smuzhiyun int ret = 0;
5132*4882a593Smuzhiyun struct megasas_cmd *cmd;
5133*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
5134*4882a593Smuzhiyun struct MR_SNAPDUMP_PROPERTIES *ci;
5135*4882a593Smuzhiyun dma_addr_t ci_h = 0;
5136*4882a593Smuzhiyun
5137*4882a593Smuzhiyun ci = instance->snapdump_prop;
5138*4882a593Smuzhiyun ci_h = instance->snapdump_prop_h;
5139*4882a593Smuzhiyun
5140*4882a593Smuzhiyun if (!ci)
5141*4882a593Smuzhiyun return;
5142*4882a593Smuzhiyun
5143*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
5144*4882a593Smuzhiyun
5145*4882a593Smuzhiyun if (!cmd) {
5146*4882a593Smuzhiyun dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5147*4882a593Smuzhiyun return;
5148*4882a593Smuzhiyun }
5149*4882a593Smuzhiyun
5150*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
5151*4882a593Smuzhiyun
5152*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
5153*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5154*4882a593Smuzhiyun
5155*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
5156*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5157*4882a593Smuzhiyun dcmd->sge_count = 1;
5158*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
5159*4882a593Smuzhiyun dcmd->timeout = 0;
5160*4882a593Smuzhiyun dcmd->pad_0 = 0;
5161*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5162*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5163*4882a593Smuzhiyun
5164*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, ci_h,
5165*4882a593Smuzhiyun sizeof(struct MR_SNAPDUMP_PROPERTIES));
5166*4882a593Smuzhiyun
5167*4882a593Smuzhiyun if (!instance->mask_interrupts) {
5168*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd,
5169*4882a593Smuzhiyun MFI_IO_TIMEOUT_SECS);
5170*4882a593Smuzhiyun } else {
5171*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
5172*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5173*4882a593Smuzhiyun }
5174*4882a593Smuzhiyun
5175*4882a593Smuzhiyun switch (ret) {
5176*4882a593Smuzhiyun case DCMD_SUCCESS:
5177*4882a593Smuzhiyun instance->snapdump_wait_time =
5178*4882a593Smuzhiyun min_t(u8, ci->trigger_min_num_sec_before_ocr,
5179*4882a593Smuzhiyun MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5180*4882a593Smuzhiyun break;
5181*4882a593Smuzhiyun
5182*4882a593Smuzhiyun case DCMD_TIMEOUT:
5183*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
5184*4882a593Smuzhiyun case INITIATE_OCR:
5185*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5186*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
5187*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
5188*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
5189*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
5190*4882a593Smuzhiyun break;
5191*4882a593Smuzhiyun case KILL_ADAPTER:
5192*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
5193*4882a593Smuzhiyun break;
5194*4882a593Smuzhiyun case IGNORE_TIMEOUT:
5195*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5196*4882a593Smuzhiyun __func__, __LINE__);
5197*4882a593Smuzhiyun break;
5198*4882a593Smuzhiyun }
5199*4882a593Smuzhiyun }
5200*4882a593Smuzhiyun
5201*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
5202*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5203*4882a593Smuzhiyun }
5204*4882a593Smuzhiyun
5205*4882a593Smuzhiyun /**
5206*4882a593Smuzhiyun * megasas_get_controller_info - Returns FW's controller structure
5207*4882a593Smuzhiyun * @instance: Adapter soft state
5208*4882a593Smuzhiyun *
5209*4882a593Smuzhiyun * Issues an internal command (DCMD) to get the FW's controller structure.
5210*4882a593Smuzhiyun * This information is mainly used to find out the maximum IO transfer per
5211*4882a593Smuzhiyun * command supported by the FW.
5212*4882a593Smuzhiyun */
5213*4882a593Smuzhiyun int
megasas_get_ctrl_info(struct megasas_instance * instance)5214*4882a593Smuzhiyun megasas_get_ctrl_info(struct megasas_instance *instance)
5215*4882a593Smuzhiyun {
5216*4882a593Smuzhiyun int ret = 0;
5217*4882a593Smuzhiyun struct megasas_cmd *cmd;
5218*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
5219*4882a593Smuzhiyun struct megasas_ctrl_info *ci;
5220*4882a593Smuzhiyun dma_addr_t ci_h = 0;
5221*4882a593Smuzhiyun
5222*4882a593Smuzhiyun ci = instance->ctrl_info_buf;
5223*4882a593Smuzhiyun ci_h = instance->ctrl_info_buf_h;
5224*4882a593Smuzhiyun
5225*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
5226*4882a593Smuzhiyun
5227*4882a593Smuzhiyun if (!cmd) {
5228*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5229*4882a593Smuzhiyun return -ENOMEM;
5230*4882a593Smuzhiyun }
5231*4882a593Smuzhiyun
5232*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
5233*4882a593Smuzhiyun
5234*4882a593Smuzhiyun memset(ci, 0, sizeof(*ci));
5235*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5236*4882a593Smuzhiyun
5237*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
5238*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5239*4882a593Smuzhiyun dcmd->sge_count = 1;
5240*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
5241*4882a593Smuzhiyun dcmd->timeout = 0;
5242*4882a593Smuzhiyun dcmd->pad_0 = 0;
5243*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5244*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5245*4882a593Smuzhiyun dcmd->mbox.b[0] = 1;
5246*4882a593Smuzhiyun
5247*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, ci_h,
5248*4882a593Smuzhiyun sizeof(struct megasas_ctrl_info));
5249*4882a593Smuzhiyun
5250*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
5251*4882a593Smuzhiyun !instance->mask_interrupts) {
5252*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5253*4882a593Smuzhiyun } else {
5254*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
5255*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5256*4882a593Smuzhiyun }
5257*4882a593Smuzhiyun
5258*4882a593Smuzhiyun switch (ret) {
5259*4882a593Smuzhiyun case DCMD_SUCCESS:
5260*4882a593Smuzhiyun /* Save required controller information in
5261*4882a593Smuzhiyun * CPU endianness format.
5262*4882a593Smuzhiyun */
5263*4882a593Smuzhiyun le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5264*4882a593Smuzhiyun le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5265*4882a593Smuzhiyun le32_to_cpus((u32 *)&ci->adapterOperations2);
5266*4882a593Smuzhiyun le32_to_cpus((u32 *)&ci->adapterOperations3);
5267*4882a593Smuzhiyun le16_to_cpus((u16 *)&ci->adapter_operations4);
5268*4882a593Smuzhiyun le32_to_cpus((u32 *)&ci->adapter_operations5);
5269*4882a593Smuzhiyun
5270*4882a593Smuzhiyun /* Update the latest Ext VD info.
5271*4882a593Smuzhiyun * From Init path, store current firmware details.
5272*4882a593Smuzhiyun * From OCR path, detect any firmware properties changes.
5273*4882a593Smuzhiyun * in case of Firmware upgrade without system reboot.
5274*4882a593Smuzhiyun */
5275*4882a593Smuzhiyun megasas_update_ext_vd_details(instance);
5276*4882a593Smuzhiyun instance->support_seqnum_jbod_fp =
5277*4882a593Smuzhiyun ci->adapterOperations3.useSeqNumJbodFP;
5278*4882a593Smuzhiyun instance->support_morethan256jbod =
5279*4882a593Smuzhiyun ci->adapter_operations4.support_pd_map_target_id;
5280*4882a593Smuzhiyun instance->support_nvme_passthru =
5281*4882a593Smuzhiyun ci->adapter_operations4.support_nvme_passthru;
5282*4882a593Smuzhiyun instance->support_pci_lane_margining =
5283*4882a593Smuzhiyun ci->adapter_operations5.support_pci_lane_margining;
5284*4882a593Smuzhiyun instance->task_abort_tmo = ci->TaskAbortTO;
5285*4882a593Smuzhiyun instance->max_reset_tmo = ci->MaxResetTO;
5286*4882a593Smuzhiyun
5287*4882a593Smuzhiyun /*Check whether controller is iMR or MR */
5288*4882a593Smuzhiyun instance->is_imr = (ci->memory_size ? 0 : 1);
5289*4882a593Smuzhiyun
5290*4882a593Smuzhiyun instance->snapdump_wait_time =
5291*4882a593Smuzhiyun (ci->properties.on_off_properties2.enable_snap_dump ?
5292*4882a593Smuzhiyun MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5293*4882a593Smuzhiyun
5294*4882a593Smuzhiyun instance->enable_fw_dev_list =
5295*4882a593Smuzhiyun ci->properties.on_off_properties2.enable_fw_dev_list;
5296*4882a593Smuzhiyun
5297*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5298*4882a593Smuzhiyun "controller type\t: %s(%dMB)\n",
5299*4882a593Smuzhiyun instance->is_imr ? "iMR" : "MR",
5300*4882a593Smuzhiyun le16_to_cpu(ci->memory_size));
5301*4882a593Smuzhiyun
5302*4882a593Smuzhiyun instance->disableOnlineCtrlReset =
5303*4882a593Smuzhiyun ci->properties.OnOffProperties.disableOnlineCtrlReset;
5304*4882a593Smuzhiyun instance->secure_jbod_support =
5305*4882a593Smuzhiyun ci->adapterOperations3.supportSecurityonJBOD;
5306*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5307*4882a593Smuzhiyun instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5308*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5309*4882a593Smuzhiyun instance->secure_jbod_support ? "Yes" : "No");
5310*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5311*4882a593Smuzhiyun instance->support_nvme_passthru ? "Yes" : "No");
5312*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5313*4882a593Smuzhiyun "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5314*4882a593Smuzhiyun instance->task_abort_tmo, instance->max_reset_tmo);
5315*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5316*4882a593Smuzhiyun instance->support_seqnum_jbod_fp ? "Yes" : "No");
5317*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5318*4882a593Smuzhiyun instance->support_pci_lane_margining ? "Yes" : "No");
5319*4882a593Smuzhiyun
5320*4882a593Smuzhiyun break;
5321*4882a593Smuzhiyun
5322*4882a593Smuzhiyun case DCMD_TIMEOUT:
5323*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
5324*4882a593Smuzhiyun case INITIATE_OCR:
5325*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5326*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
5327*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
5328*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
5329*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
5330*4882a593Smuzhiyun break;
5331*4882a593Smuzhiyun case KILL_ADAPTER:
5332*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
5333*4882a593Smuzhiyun break;
5334*4882a593Smuzhiyun case IGNORE_TIMEOUT:
5335*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5336*4882a593Smuzhiyun __func__, __LINE__);
5337*4882a593Smuzhiyun break;
5338*4882a593Smuzhiyun }
5339*4882a593Smuzhiyun break;
5340*4882a593Smuzhiyun case DCMD_FAILED:
5341*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
5342*4882a593Smuzhiyun break;
5343*4882a593Smuzhiyun
5344*4882a593Smuzhiyun }
5345*4882a593Smuzhiyun
5346*4882a593Smuzhiyun if (ret != DCMD_TIMEOUT)
5347*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5348*4882a593Smuzhiyun
5349*4882a593Smuzhiyun return ret;
5350*4882a593Smuzhiyun }
5351*4882a593Smuzhiyun
5352*4882a593Smuzhiyun /*
5353*4882a593Smuzhiyun * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
5354*4882a593Smuzhiyun * to firmware
5355*4882a593Smuzhiyun *
5356*4882a593Smuzhiyun * @instance: Adapter soft state
5357*4882a593Smuzhiyun * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
5358*4882a593Smuzhiyun MR_CRASH_BUF_TURN_OFF = 0
5359*4882a593Smuzhiyun MR_CRASH_BUF_TURN_ON = 1
5360*4882a593Smuzhiyun * @return 0 on success non-zero on failure.
5361*4882a593Smuzhiyun * Issues an internal command (DCMD) to set parameters for crash dump feature.
5362*4882a593Smuzhiyun * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5363*4882a593Smuzhiyun * that driver supports crash dump feature. This DCMD will be sent only if
5364*4882a593Smuzhiyun * crash dump feature is supported by the FW.
5365*4882a593Smuzhiyun *
5366*4882a593Smuzhiyun */
megasas_set_crash_dump_params(struct megasas_instance * instance,u8 crash_buf_state)5367*4882a593Smuzhiyun int megasas_set_crash_dump_params(struct megasas_instance *instance,
5368*4882a593Smuzhiyun u8 crash_buf_state)
5369*4882a593Smuzhiyun {
5370*4882a593Smuzhiyun int ret = 0;
5371*4882a593Smuzhiyun struct megasas_cmd *cmd;
5372*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
5373*4882a593Smuzhiyun
5374*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
5375*4882a593Smuzhiyun
5376*4882a593Smuzhiyun if (!cmd) {
5377*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5378*4882a593Smuzhiyun return -ENOMEM;
5379*4882a593Smuzhiyun }
5380*4882a593Smuzhiyun
5381*4882a593Smuzhiyun
5382*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
5383*4882a593Smuzhiyun
5384*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5385*4882a593Smuzhiyun dcmd->mbox.b[0] = crash_buf_state;
5386*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
5387*4882a593Smuzhiyun dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5388*4882a593Smuzhiyun dcmd->sge_count = 1;
5389*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_NONE;
5390*4882a593Smuzhiyun dcmd->timeout = 0;
5391*4882a593Smuzhiyun dcmd->pad_0 = 0;
5392*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5393*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5394*4882a593Smuzhiyun
5395*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5396*4882a593Smuzhiyun CRASH_DMA_BUF_SIZE);
5397*4882a593Smuzhiyun
5398*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
5399*4882a593Smuzhiyun !instance->mask_interrupts)
5400*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5401*4882a593Smuzhiyun else
5402*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
5403*4882a593Smuzhiyun
5404*4882a593Smuzhiyun if (ret == DCMD_TIMEOUT) {
5405*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
5406*4882a593Smuzhiyun case INITIATE_OCR:
5407*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5408*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
5409*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
5410*4882a593Smuzhiyun break;
5411*4882a593Smuzhiyun case KILL_ADAPTER:
5412*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
5413*4882a593Smuzhiyun break;
5414*4882a593Smuzhiyun case IGNORE_TIMEOUT:
5415*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5416*4882a593Smuzhiyun __func__, __LINE__);
5417*4882a593Smuzhiyun break;
5418*4882a593Smuzhiyun }
5419*4882a593Smuzhiyun } else
5420*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5421*4882a593Smuzhiyun
5422*4882a593Smuzhiyun return ret;
5423*4882a593Smuzhiyun }
5424*4882a593Smuzhiyun
5425*4882a593Smuzhiyun /**
5426*4882a593Smuzhiyun * megasas_issue_init_mfi - Initializes the FW
5427*4882a593Smuzhiyun * @instance: Adapter soft state
5428*4882a593Smuzhiyun *
5429*4882a593Smuzhiyun * Issues the INIT MFI cmd
5430*4882a593Smuzhiyun */
5431*4882a593Smuzhiyun static int
megasas_issue_init_mfi(struct megasas_instance * instance)5432*4882a593Smuzhiyun megasas_issue_init_mfi(struct megasas_instance *instance)
5433*4882a593Smuzhiyun {
5434*4882a593Smuzhiyun __le32 context;
5435*4882a593Smuzhiyun struct megasas_cmd *cmd;
5436*4882a593Smuzhiyun struct megasas_init_frame *init_frame;
5437*4882a593Smuzhiyun struct megasas_init_queue_info *initq_info;
5438*4882a593Smuzhiyun dma_addr_t init_frame_h;
5439*4882a593Smuzhiyun dma_addr_t initq_info_h;
5440*4882a593Smuzhiyun
5441*4882a593Smuzhiyun /*
5442*4882a593Smuzhiyun * Prepare a init frame. Note the init frame points to queue info
5443*4882a593Smuzhiyun * structure. Each frame has SGL allocated after first 64 bytes. For
5444*4882a593Smuzhiyun * this frame - since we don't need any SGL - we use SGL's space as
5445*4882a593Smuzhiyun * queue info structure
5446*4882a593Smuzhiyun *
5447*4882a593Smuzhiyun * We will not get a NULL command below. We just created the pool.
5448*4882a593Smuzhiyun */
5449*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
5450*4882a593Smuzhiyun
5451*4882a593Smuzhiyun init_frame = (struct megasas_init_frame *)cmd->frame;
5452*4882a593Smuzhiyun initq_info = (struct megasas_init_queue_info *)
5453*4882a593Smuzhiyun ((unsigned long)init_frame + 64);
5454*4882a593Smuzhiyun
5455*4882a593Smuzhiyun init_frame_h = cmd->frame_phys_addr;
5456*4882a593Smuzhiyun initq_info_h = init_frame_h + 64;
5457*4882a593Smuzhiyun
5458*4882a593Smuzhiyun context = init_frame->context;
5459*4882a593Smuzhiyun memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5460*4882a593Smuzhiyun memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5461*4882a593Smuzhiyun init_frame->context = context;
5462*4882a593Smuzhiyun
5463*4882a593Smuzhiyun initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5464*4882a593Smuzhiyun initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5465*4882a593Smuzhiyun
5466*4882a593Smuzhiyun initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5467*4882a593Smuzhiyun initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5468*4882a593Smuzhiyun
5469*4882a593Smuzhiyun init_frame->cmd = MFI_CMD_INIT;
5470*4882a593Smuzhiyun init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5471*4882a593Smuzhiyun init_frame->queue_info_new_phys_addr_lo =
5472*4882a593Smuzhiyun cpu_to_le32(lower_32_bits(initq_info_h));
5473*4882a593Smuzhiyun init_frame->queue_info_new_phys_addr_hi =
5474*4882a593Smuzhiyun cpu_to_le32(upper_32_bits(initq_info_h));
5475*4882a593Smuzhiyun
5476*4882a593Smuzhiyun init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5477*4882a593Smuzhiyun
5478*4882a593Smuzhiyun /*
5479*4882a593Smuzhiyun * disable the intr before firing the init frame to FW
5480*4882a593Smuzhiyun */
5481*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
5482*4882a593Smuzhiyun
5483*4882a593Smuzhiyun /*
5484*4882a593Smuzhiyun * Issue the init frame in polled mode
5485*4882a593Smuzhiyun */
5486*4882a593Smuzhiyun
5487*4882a593Smuzhiyun if (megasas_issue_polled(instance, cmd)) {
5488*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5489*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5490*4882a593Smuzhiyun goto fail_fw_init;
5491*4882a593Smuzhiyun }
5492*4882a593Smuzhiyun
5493*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
5494*4882a593Smuzhiyun
5495*4882a593Smuzhiyun return 0;
5496*4882a593Smuzhiyun
5497*4882a593Smuzhiyun fail_fw_init:
5498*4882a593Smuzhiyun return -EINVAL;
5499*4882a593Smuzhiyun }
5500*4882a593Smuzhiyun
5501*4882a593Smuzhiyun static u32
megasas_init_adapter_mfi(struct megasas_instance * instance)5502*4882a593Smuzhiyun megasas_init_adapter_mfi(struct megasas_instance *instance)
5503*4882a593Smuzhiyun {
5504*4882a593Smuzhiyun u32 context_sz;
5505*4882a593Smuzhiyun u32 reply_q_sz;
5506*4882a593Smuzhiyun
5507*4882a593Smuzhiyun /*
5508*4882a593Smuzhiyun * Get various operational parameters from status register
5509*4882a593Smuzhiyun */
5510*4882a593Smuzhiyun instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5511*4882a593Smuzhiyun /*
5512*4882a593Smuzhiyun * Reduce the max supported cmds by 1. This is to ensure that the
5513*4882a593Smuzhiyun * reply_q_sz (1 more than the max cmd that driver may send)
5514*4882a593Smuzhiyun * does not exceed max cmds that the FW can support
5515*4882a593Smuzhiyun */
5516*4882a593Smuzhiyun instance->max_fw_cmds = instance->max_fw_cmds-1;
5517*4882a593Smuzhiyun instance->max_mfi_cmds = instance->max_fw_cmds;
5518*4882a593Smuzhiyun instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5519*4882a593Smuzhiyun 0x10;
5520*4882a593Smuzhiyun /*
5521*4882a593Smuzhiyun * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5522*4882a593Smuzhiyun * are reserved for IOCTL + driver's internal DCMDs.
5523*4882a593Smuzhiyun */
5524*4882a593Smuzhiyun if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5525*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5526*4882a593Smuzhiyun instance->max_scsi_cmds = (instance->max_fw_cmds -
5527*4882a593Smuzhiyun MEGASAS_SKINNY_INT_CMDS);
5528*4882a593Smuzhiyun sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5529*4882a593Smuzhiyun } else {
5530*4882a593Smuzhiyun instance->max_scsi_cmds = (instance->max_fw_cmds -
5531*4882a593Smuzhiyun MEGASAS_INT_CMDS);
5532*4882a593Smuzhiyun sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5533*4882a593Smuzhiyun }
5534*4882a593Smuzhiyun
5535*4882a593Smuzhiyun instance->cur_can_queue = instance->max_scsi_cmds;
5536*4882a593Smuzhiyun /*
5537*4882a593Smuzhiyun * Create a pool of commands
5538*4882a593Smuzhiyun */
5539*4882a593Smuzhiyun if (megasas_alloc_cmds(instance))
5540*4882a593Smuzhiyun goto fail_alloc_cmds;
5541*4882a593Smuzhiyun
5542*4882a593Smuzhiyun /*
5543*4882a593Smuzhiyun * Allocate memory for reply queue. Length of reply queue should
5544*4882a593Smuzhiyun * be _one_ more than the maximum commands handled by the firmware.
5545*4882a593Smuzhiyun *
5546*4882a593Smuzhiyun * Note: When FW completes commands, it places corresponding contex
5547*4882a593Smuzhiyun * values in this circular reply queue. This circular queue is a fairly
5548*4882a593Smuzhiyun * typical producer-consumer queue. FW is the producer (of completed
5549*4882a593Smuzhiyun * commands) and the driver is the consumer.
5550*4882a593Smuzhiyun */
5551*4882a593Smuzhiyun context_sz = sizeof(u32);
5552*4882a593Smuzhiyun reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5553*4882a593Smuzhiyun
5554*4882a593Smuzhiyun instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5555*4882a593Smuzhiyun reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5556*4882a593Smuzhiyun
5557*4882a593Smuzhiyun if (!instance->reply_queue) {
5558*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5559*4882a593Smuzhiyun goto fail_reply_queue;
5560*4882a593Smuzhiyun }
5561*4882a593Smuzhiyun
5562*4882a593Smuzhiyun if (megasas_issue_init_mfi(instance))
5563*4882a593Smuzhiyun goto fail_fw_init;
5564*4882a593Smuzhiyun
5565*4882a593Smuzhiyun if (megasas_get_ctrl_info(instance)) {
5566*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5567*4882a593Smuzhiyun "Fail from %s %d\n", instance->unique_id,
5568*4882a593Smuzhiyun __func__, __LINE__);
5569*4882a593Smuzhiyun goto fail_fw_init;
5570*4882a593Smuzhiyun }
5571*4882a593Smuzhiyun
5572*4882a593Smuzhiyun instance->fw_support_ieee = 0;
5573*4882a593Smuzhiyun instance->fw_support_ieee =
5574*4882a593Smuzhiyun (instance->instancet->read_fw_status_reg(instance) &
5575*4882a593Smuzhiyun 0x04000000);
5576*4882a593Smuzhiyun
5577*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5578*4882a593Smuzhiyun instance->fw_support_ieee);
5579*4882a593Smuzhiyun
5580*4882a593Smuzhiyun if (instance->fw_support_ieee)
5581*4882a593Smuzhiyun instance->flag_ieee = 1;
5582*4882a593Smuzhiyun
5583*4882a593Smuzhiyun return 0;
5584*4882a593Smuzhiyun
5585*4882a593Smuzhiyun fail_fw_init:
5586*4882a593Smuzhiyun
5587*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5588*4882a593Smuzhiyun instance->reply_queue, instance->reply_queue_h);
5589*4882a593Smuzhiyun fail_reply_queue:
5590*4882a593Smuzhiyun megasas_free_cmds(instance);
5591*4882a593Smuzhiyun
5592*4882a593Smuzhiyun fail_alloc_cmds:
5593*4882a593Smuzhiyun return 1;
5594*4882a593Smuzhiyun }
5595*4882a593Smuzhiyun
5596*4882a593Smuzhiyun static
megasas_setup_irq_poll(struct megasas_instance * instance)5597*4882a593Smuzhiyun void megasas_setup_irq_poll(struct megasas_instance *instance)
5598*4882a593Smuzhiyun {
5599*4882a593Smuzhiyun struct megasas_irq_context *irq_ctx;
5600*4882a593Smuzhiyun u32 count, i;
5601*4882a593Smuzhiyun
5602*4882a593Smuzhiyun count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5603*4882a593Smuzhiyun
5604*4882a593Smuzhiyun /* Initialize IRQ poll */
5605*4882a593Smuzhiyun for (i = 0; i < count; i++) {
5606*4882a593Smuzhiyun irq_ctx = &instance->irq_context[i];
5607*4882a593Smuzhiyun irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5608*4882a593Smuzhiyun irq_ctx->irq_poll_scheduled = false;
5609*4882a593Smuzhiyun irq_poll_init(&irq_ctx->irqpoll,
5610*4882a593Smuzhiyun instance->threshold_reply_count,
5611*4882a593Smuzhiyun megasas_irqpoll);
5612*4882a593Smuzhiyun }
5613*4882a593Smuzhiyun }
5614*4882a593Smuzhiyun
5615*4882a593Smuzhiyun /*
5616*4882a593Smuzhiyun * megasas_setup_irqs_ioapic - register legacy interrupts.
5617*4882a593Smuzhiyun * @instance: Adapter soft state
5618*4882a593Smuzhiyun *
5619*4882a593Smuzhiyun * Do not enable interrupt, only setup ISRs.
5620*4882a593Smuzhiyun *
5621*4882a593Smuzhiyun * Return 0 on success.
5622*4882a593Smuzhiyun */
5623*4882a593Smuzhiyun static int
megasas_setup_irqs_ioapic(struct megasas_instance * instance)5624*4882a593Smuzhiyun megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5625*4882a593Smuzhiyun {
5626*4882a593Smuzhiyun struct pci_dev *pdev;
5627*4882a593Smuzhiyun
5628*4882a593Smuzhiyun pdev = instance->pdev;
5629*4882a593Smuzhiyun instance->irq_context[0].instance = instance;
5630*4882a593Smuzhiyun instance->irq_context[0].MSIxIndex = 0;
5631*4882a593Smuzhiyun snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5632*4882a593Smuzhiyun "megasas", instance->host->host_no);
5633*4882a593Smuzhiyun if (request_irq(pci_irq_vector(pdev, 0),
5634*4882a593Smuzhiyun instance->instancet->service_isr, IRQF_SHARED,
5635*4882a593Smuzhiyun instance->irq_context->name, &instance->irq_context[0])) {
5636*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
5637*4882a593Smuzhiyun "Failed to register IRQ from %s %d\n",
5638*4882a593Smuzhiyun __func__, __LINE__);
5639*4882a593Smuzhiyun return -1;
5640*4882a593Smuzhiyun }
5641*4882a593Smuzhiyun instance->perf_mode = MR_LATENCY_PERF_MODE;
5642*4882a593Smuzhiyun instance->low_latency_index_start = 0;
5643*4882a593Smuzhiyun return 0;
5644*4882a593Smuzhiyun }
5645*4882a593Smuzhiyun
5646*4882a593Smuzhiyun /**
5647*4882a593Smuzhiyun * megasas_setup_irqs_msix - register MSI-x interrupts.
5648*4882a593Smuzhiyun * @instance: Adapter soft state
5649*4882a593Smuzhiyun * @is_probe: Driver probe check
5650*4882a593Smuzhiyun *
5651*4882a593Smuzhiyun * Do not enable interrupt, only setup ISRs.
5652*4882a593Smuzhiyun *
5653*4882a593Smuzhiyun * Return 0 on success.
5654*4882a593Smuzhiyun */
5655*4882a593Smuzhiyun static int
megasas_setup_irqs_msix(struct megasas_instance * instance,u8 is_probe)5656*4882a593Smuzhiyun megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5657*4882a593Smuzhiyun {
5658*4882a593Smuzhiyun int i, j;
5659*4882a593Smuzhiyun struct pci_dev *pdev;
5660*4882a593Smuzhiyun
5661*4882a593Smuzhiyun pdev = instance->pdev;
5662*4882a593Smuzhiyun
5663*4882a593Smuzhiyun /* Try MSI-x */
5664*4882a593Smuzhiyun for (i = 0; i < instance->msix_vectors; i++) {
5665*4882a593Smuzhiyun instance->irq_context[i].instance = instance;
5666*4882a593Smuzhiyun instance->irq_context[i].MSIxIndex = i;
5667*4882a593Smuzhiyun snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5668*4882a593Smuzhiyun "megasas", instance->host->host_no, i);
5669*4882a593Smuzhiyun if (request_irq(pci_irq_vector(pdev, i),
5670*4882a593Smuzhiyun instance->instancet->service_isr, 0, instance->irq_context[i].name,
5671*4882a593Smuzhiyun &instance->irq_context[i])) {
5672*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
5673*4882a593Smuzhiyun "Failed to register IRQ for vector %d.\n", i);
5674*4882a593Smuzhiyun for (j = 0; j < i; j++) {
5675*4882a593Smuzhiyun if (j < instance->low_latency_index_start)
5676*4882a593Smuzhiyun irq_set_affinity_hint(
5677*4882a593Smuzhiyun pci_irq_vector(pdev, j), NULL);
5678*4882a593Smuzhiyun free_irq(pci_irq_vector(pdev, j),
5679*4882a593Smuzhiyun &instance->irq_context[j]);
5680*4882a593Smuzhiyun }
5681*4882a593Smuzhiyun /* Retry irq register for IO_APIC*/
5682*4882a593Smuzhiyun instance->msix_vectors = 0;
5683*4882a593Smuzhiyun instance->msix_load_balance = false;
5684*4882a593Smuzhiyun if (is_probe) {
5685*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
5686*4882a593Smuzhiyun return megasas_setup_irqs_ioapic(instance);
5687*4882a593Smuzhiyun } else {
5688*4882a593Smuzhiyun return -1;
5689*4882a593Smuzhiyun }
5690*4882a593Smuzhiyun }
5691*4882a593Smuzhiyun }
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun return 0;
5694*4882a593Smuzhiyun }
5695*4882a593Smuzhiyun
5696*4882a593Smuzhiyun /*
5697*4882a593Smuzhiyun * megasas_destroy_irqs- unregister interrupts.
5698*4882a593Smuzhiyun * @instance: Adapter soft state
5699*4882a593Smuzhiyun * return: void
5700*4882a593Smuzhiyun */
5701*4882a593Smuzhiyun static void
megasas_destroy_irqs(struct megasas_instance * instance)5702*4882a593Smuzhiyun megasas_destroy_irqs(struct megasas_instance *instance) {
5703*4882a593Smuzhiyun
5704*4882a593Smuzhiyun int i;
5705*4882a593Smuzhiyun int count;
5706*4882a593Smuzhiyun struct megasas_irq_context *irq_ctx;
5707*4882a593Smuzhiyun
5708*4882a593Smuzhiyun count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5709*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
5710*4882a593Smuzhiyun for (i = 0; i < count; i++) {
5711*4882a593Smuzhiyun irq_ctx = &instance->irq_context[i];
5712*4882a593Smuzhiyun irq_poll_disable(&irq_ctx->irqpoll);
5713*4882a593Smuzhiyun }
5714*4882a593Smuzhiyun }
5715*4882a593Smuzhiyun
5716*4882a593Smuzhiyun if (instance->msix_vectors)
5717*4882a593Smuzhiyun for (i = 0; i < instance->msix_vectors; i++) {
5718*4882a593Smuzhiyun if (i < instance->low_latency_index_start)
5719*4882a593Smuzhiyun irq_set_affinity_hint(
5720*4882a593Smuzhiyun pci_irq_vector(instance->pdev, i), NULL);
5721*4882a593Smuzhiyun free_irq(pci_irq_vector(instance->pdev, i),
5722*4882a593Smuzhiyun &instance->irq_context[i]);
5723*4882a593Smuzhiyun }
5724*4882a593Smuzhiyun else
5725*4882a593Smuzhiyun free_irq(pci_irq_vector(instance->pdev, 0),
5726*4882a593Smuzhiyun &instance->irq_context[0]);
5727*4882a593Smuzhiyun }
5728*4882a593Smuzhiyun
5729*4882a593Smuzhiyun /**
5730*4882a593Smuzhiyun * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5731*4882a593Smuzhiyun * @instance: Adapter soft state
5732*4882a593Smuzhiyun *
5733*4882a593Smuzhiyun * Return 0 on success.
5734*4882a593Smuzhiyun */
5735*4882a593Smuzhiyun void
megasas_setup_jbod_map(struct megasas_instance * instance)5736*4882a593Smuzhiyun megasas_setup_jbod_map(struct megasas_instance *instance)
5737*4882a593Smuzhiyun {
5738*4882a593Smuzhiyun int i;
5739*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
5740*4882a593Smuzhiyun u32 pd_seq_map_sz;
5741*4882a593Smuzhiyun
5742*4882a593Smuzhiyun pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5743*4882a593Smuzhiyun (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5744*4882a593Smuzhiyun
5745*4882a593Smuzhiyun instance->use_seqnum_jbod_fp =
5746*4882a593Smuzhiyun instance->support_seqnum_jbod_fp;
5747*4882a593Smuzhiyun if (reset_devices || !fusion ||
5748*4882a593Smuzhiyun !instance->support_seqnum_jbod_fp) {
5749*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5750*4882a593Smuzhiyun "JBOD sequence map is disabled %s %d\n",
5751*4882a593Smuzhiyun __func__, __LINE__);
5752*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = false;
5753*4882a593Smuzhiyun return;
5754*4882a593Smuzhiyun }
5755*4882a593Smuzhiyun
5756*4882a593Smuzhiyun if (fusion->pd_seq_sync[0])
5757*4882a593Smuzhiyun goto skip_alloc;
5758*4882a593Smuzhiyun
5759*4882a593Smuzhiyun for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5760*4882a593Smuzhiyun fusion->pd_seq_sync[i] = dma_alloc_coherent
5761*4882a593Smuzhiyun (&instance->pdev->dev, pd_seq_map_sz,
5762*4882a593Smuzhiyun &fusion->pd_seq_phys[i], GFP_KERNEL);
5763*4882a593Smuzhiyun if (!fusion->pd_seq_sync[i]) {
5764*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
5765*4882a593Smuzhiyun "Failed to allocate memory from %s %d\n",
5766*4882a593Smuzhiyun __func__, __LINE__);
5767*4882a593Smuzhiyun if (i == 1) {
5768*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
5769*4882a593Smuzhiyun pd_seq_map_sz, fusion->pd_seq_sync[0],
5770*4882a593Smuzhiyun fusion->pd_seq_phys[0]);
5771*4882a593Smuzhiyun fusion->pd_seq_sync[0] = NULL;
5772*4882a593Smuzhiyun }
5773*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = false;
5774*4882a593Smuzhiyun return;
5775*4882a593Smuzhiyun }
5776*4882a593Smuzhiyun }
5777*4882a593Smuzhiyun
5778*4882a593Smuzhiyun skip_alloc:
5779*4882a593Smuzhiyun if (!megasas_sync_pd_seq_num(instance, false) &&
5780*4882a593Smuzhiyun !megasas_sync_pd_seq_num(instance, true))
5781*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = true;
5782*4882a593Smuzhiyun else
5783*4882a593Smuzhiyun instance->use_seqnum_jbod_fp = false;
5784*4882a593Smuzhiyun }
5785*4882a593Smuzhiyun
megasas_setup_reply_map(struct megasas_instance * instance)5786*4882a593Smuzhiyun static void megasas_setup_reply_map(struct megasas_instance *instance)
5787*4882a593Smuzhiyun {
5788*4882a593Smuzhiyun const struct cpumask *mask;
5789*4882a593Smuzhiyun unsigned int queue, cpu, low_latency_index_start;
5790*4882a593Smuzhiyun
5791*4882a593Smuzhiyun low_latency_index_start = instance->low_latency_index_start;
5792*4882a593Smuzhiyun
5793*4882a593Smuzhiyun for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5794*4882a593Smuzhiyun mask = pci_irq_get_affinity(instance->pdev, queue);
5795*4882a593Smuzhiyun if (!mask)
5796*4882a593Smuzhiyun goto fallback;
5797*4882a593Smuzhiyun
5798*4882a593Smuzhiyun for_each_cpu(cpu, mask)
5799*4882a593Smuzhiyun instance->reply_map[cpu] = queue;
5800*4882a593Smuzhiyun }
5801*4882a593Smuzhiyun return;
5802*4882a593Smuzhiyun
5803*4882a593Smuzhiyun fallback:
5804*4882a593Smuzhiyun queue = low_latency_index_start;
5805*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
5806*4882a593Smuzhiyun instance->reply_map[cpu] = queue;
5807*4882a593Smuzhiyun if (queue == (instance->msix_vectors - 1))
5808*4882a593Smuzhiyun queue = low_latency_index_start;
5809*4882a593Smuzhiyun else
5810*4882a593Smuzhiyun queue++;
5811*4882a593Smuzhiyun }
5812*4882a593Smuzhiyun }
5813*4882a593Smuzhiyun
5814*4882a593Smuzhiyun /**
5815*4882a593Smuzhiyun * megasas_get_device_list - Get the PD and LD device list from FW.
5816*4882a593Smuzhiyun * @instance: Adapter soft state
5817*4882a593Smuzhiyun * @return: Success or failure
5818*4882a593Smuzhiyun *
5819*4882a593Smuzhiyun * Issue DCMDs to Firmware to get the PD and LD list.
5820*4882a593Smuzhiyun * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5821*4882a593Smuzhiyun * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5822*4882a593Smuzhiyun */
5823*4882a593Smuzhiyun static
megasas_get_device_list(struct megasas_instance * instance)5824*4882a593Smuzhiyun int megasas_get_device_list(struct megasas_instance *instance)
5825*4882a593Smuzhiyun {
5826*4882a593Smuzhiyun memset(instance->pd_list, 0,
5827*4882a593Smuzhiyun (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5828*4882a593Smuzhiyun memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5829*4882a593Smuzhiyun
5830*4882a593Smuzhiyun if (instance->enable_fw_dev_list) {
5831*4882a593Smuzhiyun if (megasas_host_device_list_query(instance, true))
5832*4882a593Smuzhiyun return FAILED;
5833*4882a593Smuzhiyun } else {
5834*4882a593Smuzhiyun if (megasas_get_pd_list(instance) < 0) {
5835*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "failed to get PD list\n");
5836*4882a593Smuzhiyun return FAILED;
5837*4882a593Smuzhiyun }
5838*4882a593Smuzhiyun
5839*4882a593Smuzhiyun if (megasas_ld_list_query(instance,
5840*4882a593Smuzhiyun MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5841*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "failed to get LD list\n");
5842*4882a593Smuzhiyun return FAILED;
5843*4882a593Smuzhiyun }
5844*4882a593Smuzhiyun }
5845*4882a593Smuzhiyun
5846*4882a593Smuzhiyun return SUCCESS;
5847*4882a593Smuzhiyun }
5848*4882a593Smuzhiyun
5849*4882a593Smuzhiyun /**
5850*4882a593Smuzhiyun * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
5851*4882a593Smuzhiyun * @instance: Adapter soft state
5852*4882a593Smuzhiyun * return: void
5853*4882a593Smuzhiyun */
5854*4882a593Smuzhiyun static inline void
megasas_set_high_iops_queue_affinity_hint(struct megasas_instance * instance)5855*4882a593Smuzhiyun megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5856*4882a593Smuzhiyun {
5857*4882a593Smuzhiyun int i;
5858*4882a593Smuzhiyun int local_numa_node;
5859*4882a593Smuzhiyun
5860*4882a593Smuzhiyun if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5861*4882a593Smuzhiyun local_numa_node = dev_to_node(&instance->pdev->dev);
5862*4882a593Smuzhiyun
5863*4882a593Smuzhiyun for (i = 0; i < instance->low_latency_index_start; i++)
5864*4882a593Smuzhiyun irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5865*4882a593Smuzhiyun cpumask_of_node(local_numa_node));
5866*4882a593Smuzhiyun }
5867*4882a593Smuzhiyun }
5868*4882a593Smuzhiyun
5869*4882a593Smuzhiyun static int
__megasas_alloc_irq_vectors(struct megasas_instance * instance)5870*4882a593Smuzhiyun __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5871*4882a593Smuzhiyun {
5872*4882a593Smuzhiyun int i, irq_flags;
5873*4882a593Smuzhiyun struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5874*4882a593Smuzhiyun struct irq_affinity *descp = &desc;
5875*4882a593Smuzhiyun
5876*4882a593Smuzhiyun irq_flags = PCI_IRQ_MSIX;
5877*4882a593Smuzhiyun
5878*4882a593Smuzhiyun if (instance->smp_affinity_enable)
5879*4882a593Smuzhiyun irq_flags |= PCI_IRQ_AFFINITY;
5880*4882a593Smuzhiyun else
5881*4882a593Smuzhiyun descp = NULL;
5882*4882a593Smuzhiyun
5883*4882a593Smuzhiyun i = pci_alloc_irq_vectors_affinity(instance->pdev,
5884*4882a593Smuzhiyun instance->low_latency_index_start,
5885*4882a593Smuzhiyun instance->msix_vectors, irq_flags, descp);
5886*4882a593Smuzhiyun
5887*4882a593Smuzhiyun return i;
5888*4882a593Smuzhiyun }
5889*4882a593Smuzhiyun
5890*4882a593Smuzhiyun /**
5891*4882a593Smuzhiyun * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
5892*4882a593Smuzhiyun * @instance: Adapter soft state
5893*4882a593Smuzhiyun * return: void
5894*4882a593Smuzhiyun */
5895*4882a593Smuzhiyun static void
megasas_alloc_irq_vectors(struct megasas_instance * instance)5896*4882a593Smuzhiyun megasas_alloc_irq_vectors(struct megasas_instance *instance)
5897*4882a593Smuzhiyun {
5898*4882a593Smuzhiyun int i;
5899*4882a593Smuzhiyun unsigned int num_msix_req;
5900*4882a593Smuzhiyun
5901*4882a593Smuzhiyun i = __megasas_alloc_irq_vectors(instance);
5902*4882a593Smuzhiyun
5903*4882a593Smuzhiyun if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5904*4882a593Smuzhiyun (i != instance->msix_vectors)) {
5905*4882a593Smuzhiyun if (instance->msix_vectors)
5906*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
5907*4882a593Smuzhiyun /* Disable Balanced IOPS mode and try realloc vectors */
5908*4882a593Smuzhiyun instance->perf_mode = MR_LATENCY_PERF_MODE;
5909*4882a593Smuzhiyun instance->low_latency_index_start = 1;
5910*4882a593Smuzhiyun num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5911*4882a593Smuzhiyun
5912*4882a593Smuzhiyun instance->msix_vectors = min(num_msix_req,
5913*4882a593Smuzhiyun instance->msix_vectors);
5914*4882a593Smuzhiyun
5915*4882a593Smuzhiyun i = __megasas_alloc_irq_vectors(instance);
5916*4882a593Smuzhiyun
5917*4882a593Smuzhiyun }
5918*4882a593Smuzhiyun
5919*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
5920*4882a593Smuzhiyun "requested/available msix %d/%d\n", instance->msix_vectors, i);
5921*4882a593Smuzhiyun
5922*4882a593Smuzhiyun if (i > 0)
5923*4882a593Smuzhiyun instance->msix_vectors = i;
5924*4882a593Smuzhiyun else
5925*4882a593Smuzhiyun instance->msix_vectors = 0;
5926*4882a593Smuzhiyun
5927*4882a593Smuzhiyun if (instance->smp_affinity_enable)
5928*4882a593Smuzhiyun megasas_set_high_iops_queue_affinity_hint(instance);
5929*4882a593Smuzhiyun }
5930*4882a593Smuzhiyun
5931*4882a593Smuzhiyun /**
5932*4882a593Smuzhiyun * megasas_init_fw - Initializes the FW
5933*4882a593Smuzhiyun * @instance: Adapter soft state
5934*4882a593Smuzhiyun *
5935*4882a593Smuzhiyun * This is the main function for initializing firmware
5936*4882a593Smuzhiyun */
5937*4882a593Smuzhiyun
megasas_init_fw(struct megasas_instance * instance)5938*4882a593Smuzhiyun static int megasas_init_fw(struct megasas_instance *instance)
5939*4882a593Smuzhiyun {
5940*4882a593Smuzhiyun u32 max_sectors_1;
5941*4882a593Smuzhiyun u32 max_sectors_2, tmp_sectors, msix_enable;
5942*4882a593Smuzhiyun u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5943*4882a593Smuzhiyun resource_size_t base_addr;
5944*4882a593Smuzhiyun void *base_addr_phys;
5945*4882a593Smuzhiyun struct megasas_ctrl_info *ctrl_info = NULL;
5946*4882a593Smuzhiyun unsigned long bar_list;
5947*4882a593Smuzhiyun int i, j, loop;
5948*4882a593Smuzhiyun struct IOV_111 *iovPtr;
5949*4882a593Smuzhiyun struct fusion_context *fusion;
5950*4882a593Smuzhiyun bool intr_coalescing;
5951*4882a593Smuzhiyun unsigned int num_msix_req;
5952*4882a593Smuzhiyun u16 lnksta, speed;
5953*4882a593Smuzhiyun
5954*4882a593Smuzhiyun fusion = instance->ctrl_context;
5955*4882a593Smuzhiyun
5956*4882a593Smuzhiyun /* Find first memory bar */
5957*4882a593Smuzhiyun bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5958*4882a593Smuzhiyun instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5959*4882a593Smuzhiyun if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5960*4882a593Smuzhiyun "megasas: LSI")) {
5961*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5962*4882a593Smuzhiyun return -EBUSY;
5963*4882a593Smuzhiyun }
5964*4882a593Smuzhiyun
5965*4882a593Smuzhiyun base_addr = pci_resource_start(instance->pdev, instance->bar);
5966*4882a593Smuzhiyun instance->reg_set = ioremap(base_addr, 8192);
5967*4882a593Smuzhiyun
5968*4882a593Smuzhiyun if (!instance->reg_set) {
5969*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5970*4882a593Smuzhiyun goto fail_ioremap;
5971*4882a593Smuzhiyun }
5972*4882a593Smuzhiyun
5973*4882a593Smuzhiyun base_addr_phys = &base_addr;
5974*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev,
5975*4882a593Smuzhiyun "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
5976*4882a593Smuzhiyun instance->bar, base_addr_phys, instance->reg_set);
5977*4882a593Smuzhiyun
5978*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
5979*4882a593Smuzhiyun instance->instancet = &megasas_instance_template_fusion;
5980*4882a593Smuzhiyun else {
5981*4882a593Smuzhiyun switch (instance->pdev->device) {
5982*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS1078R:
5983*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS1078DE:
5984*4882a593Smuzhiyun instance->instancet = &megasas_instance_template_ppc;
5985*4882a593Smuzhiyun break;
5986*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5987*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5988*4882a593Smuzhiyun instance->instancet = &megasas_instance_template_gen2;
5989*4882a593Smuzhiyun break;
5990*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5991*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5992*4882a593Smuzhiyun instance->instancet = &megasas_instance_template_skinny;
5993*4882a593Smuzhiyun break;
5994*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_SAS1064R:
5995*4882a593Smuzhiyun case PCI_DEVICE_ID_DELL_PERC5:
5996*4882a593Smuzhiyun default:
5997*4882a593Smuzhiyun instance->instancet = &megasas_instance_template_xscale;
5998*4882a593Smuzhiyun instance->pd_list_not_supported = 1;
5999*4882a593Smuzhiyun break;
6000*4882a593Smuzhiyun }
6001*4882a593Smuzhiyun }
6002*4882a593Smuzhiyun
6003*4882a593Smuzhiyun if (megasas_transition_to_ready(instance, 0)) {
6004*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6005*4882a593Smuzhiyun "Failed to transition controller to ready from %s!\n",
6006*4882a593Smuzhiyun __func__);
6007*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
6008*4882a593Smuzhiyun status_reg = instance->instancet->read_fw_status_reg(
6009*4882a593Smuzhiyun instance);
6010*4882a593Smuzhiyun if (status_reg & MFI_RESET_ADAPTER) {
6011*4882a593Smuzhiyun if (megasas_adp_reset_wait_for_ready
6012*4882a593Smuzhiyun (instance, true, 0) == FAILED)
6013*4882a593Smuzhiyun goto fail_ready_state;
6014*4882a593Smuzhiyun } else {
6015*4882a593Smuzhiyun goto fail_ready_state;
6016*4882a593Smuzhiyun }
6017*4882a593Smuzhiyun } else {
6018*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 1);
6019*4882a593Smuzhiyun instance->instancet->adp_reset
6020*4882a593Smuzhiyun (instance, instance->reg_set);
6021*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 0);
6022*4882a593Smuzhiyun
6023*4882a593Smuzhiyun /*waiting for about 30 second before retry*/
6024*4882a593Smuzhiyun ssleep(30);
6025*4882a593Smuzhiyun
6026*4882a593Smuzhiyun if (megasas_transition_to_ready(instance, 0))
6027*4882a593Smuzhiyun goto fail_ready_state;
6028*4882a593Smuzhiyun }
6029*4882a593Smuzhiyun
6030*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6031*4882a593Smuzhiyun "FW restarted successfully from %s!\n",
6032*4882a593Smuzhiyun __func__);
6033*4882a593Smuzhiyun }
6034*4882a593Smuzhiyun
6035*4882a593Smuzhiyun megasas_init_ctrl_params(instance);
6036*4882a593Smuzhiyun
6037*4882a593Smuzhiyun if (megasas_set_dma_mask(instance))
6038*4882a593Smuzhiyun goto fail_ready_state;
6039*4882a593Smuzhiyun
6040*4882a593Smuzhiyun if (megasas_alloc_ctrl_mem(instance))
6041*4882a593Smuzhiyun goto fail_alloc_dma_buf;
6042*4882a593Smuzhiyun
6043*4882a593Smuzhiyun if (megasas_alloc_ctrl_dma_buffers(instance))
6044*4882a593Smuzhiyun goto fail_alloc_dma_buf;
6045*4882a593Smuzhiyun
6046*4882a593Smuzhiyun fusion = instance->ctrl_context;
6047*4882a593Smuzhiyun
6048*4882a593Smuzhiyun if (instance->adapter_type >= VENTURA_SERIES) {
6049*4882a593Smuzhiyun scratch_pad_2 =
6050*4882a593Smuzhiyun megasas_readl(instance,
6051*4882a593Smuzhiyun &instance->reg_set->outbound_scratch_pad_2);
6052*4882a593Smuzhiyun instance->max_raid_mapsize = ((scratch_pad_2 >>
6053*4882a593Smuzhiyun MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
6054*4882a593Smuzhiyun MR_MAX_RAID_MAP_SIZE_MASK);
6055*4882a593Smuzhiyun }
6056*4882a593Smuzhiyun
6057*4882a593Smuzhiyun instance->enable_sdev_max_qd = enable_sdev_max_qd;
6058*4882a593Smuzhiyun
6059*4882a593Smuzhiyun switch (instance->adapter_type) {
6060*4882a593Smuzhiyun case VENTURA_SERIES:
6061*4882a593Smuzhiyun fusion->pcie_bw_limitation = true;
6062*4882a593Smuzhiyun break;
6063*4882a593Smuzhiyun case AERO_SERIES:
6064*4882a593Smuzhiyun fusion->r56_div_offload = true;
6065*4882a593Smuzhiyun break;
6066*4882a593Smuzhiyun default:
6067*4882a593Smuzhiyun break;
6068*4882a593Smuzhiyun }
6069*4882a593Smuzhiyun
6070*4882a593Smuzhiyun /* Check if MSI-X is supported while in ready state */
6071*4882a593Smuzhiyun msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6072*4882a593Smuzhiyun 0x4000000) >> 0x1a;
6073*4882a593Smuzhiyun if (msix_enable && !msix_disable) {
6074*4882a593Smuzhiyun
6075*4882a593Smuzhiyun scratch_pad_1 = megasas_readl
6076*4882a593Smuzhiyun (instance, &instance->reg_set->outbound_scratch_pad_1);
6077*4882a593Smuzhiyun /* Check max MSI-X vectors */
6078*4882a593Smuzhiyun if (fusion) {
6079*4882a593Smuzhiyun if (instance->adapter_type == THUNDERBOLT_SERIES) {
6080*4882a593Smuzhiyun /* Thunderbolt Series*/
6081*4882a593Smuzhiyun instance->msix_vectors = (scratch_pad_1
6082*4882a593Smuzhiyun & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6083*4882a593Smuzhiyun } else {
6084*4882a593Smuzhiyun instance->msix_vectors = ((scratch_pad_1
6085*4882a593Smuzhiyun & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6086*4882a593Smuzhiyun >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6087*4882a593Smuzhiyun
6088*4882a593Smuzhiyun /*
6089*4882a593Smuzhiyun * For Invader series, > 8 MSI-x vectors
6090*4882a593Smuzhiyun * supported by FW/HW implies combined
6091*4882a593Smuzhiyun * reply queue mode is enabled.
6092*4882a593Smuzhiyun * For Ventura series, > 16 MSI-x vectors
6093*4882a593Smuzhiyun * supported by FW/HW implies combined
6094*4882a593Smuzhiyun * reply queue mode is enabled.
6095*4882a593Smuzhiyun */
6096*4882a593Smuzhiyun switch (instance->adapter_type) {
6097*4882a593Smuzhiyun case INVADER_SERIES:
6098*4882a593Smuzhiyun if (instance->msix_vectors > 8)
6099*4882a593Smuzhiyun instance->msix_combined = true;
6100*4882a593Smuzhiyun break;
6101*4882a593Smuzhiyun case AERO_SERIES:
6102*4882a593Smuzhiyun case VENTURA_SERIES:
6103*4882a593Smuzhiyun if (instance->msix_vectors > 16)
6104*4882a593Smuzhiyun instance->msix_combined = true;
6105*4882a593Smuzhiyun break;
6106*4882a593Smuzhiyun }
6107*4882a593Smuzhiyun
6108*4882a593Smuzhiyun if (rdpq_enable)
6109*4882a593Smuzhiyun instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6110*4882a593Smuzhiyun 1 : 0;
6111*4882a593Smuzhiyun
6112*4882a593Smuzhiyun if (instance->adapter_type >= INVADER_SERIES &&
6113*4882a593Smuzhiyun !instance->msix_combined) {
6114*4882a593Smuzhiyun instance->msix_load_balance = true;
6115*4882a593Smuzhiyun instance->smp_affinity_enable = false;
6116*4882a593Smuzhiyun }
6117*4882a593Smuzhiyun
6118*4882a593Smuzhiyun /* Save 1-15 reply post index address to local memory
6119*4882a593Smuzhiyun * Index 0 is already saved from reg offset
6120*4882a593Smuzhiyun * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6121*4882a593Smuzhiyun */
6122*4882a593Smuzhiyun for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6123*4882a593Smuzhiyun instance->reply_post_host_index_addr[loop] =
6124*4882a593Smuzhiyun (u32 __iomem *)
6125*4882a593Smuzhiyun ((u8 __iomem *)instance->reg_set +
6126*4882a593Smuzhiyun MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6127*4882a593Smuzhiyun + (loop * 0x10));
6128*4882a593Smuzhiyun }
6129*4882a593Smuzhiyun }
6130*4882a593Smuzhiyun
6131*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6132*4882a593Smuzhiyun "firmware supports msix\t: (%d)",
6133*4882a593Smuzhiyun instance->msix_vectors);
6134*4882a593Smuzhiyun if (msix_vectors)
6135*4882a593Smuzhiyun instance->msix_vectors = min(msix_vectors,
6136*4882a593Smuzhiyun instance->msix_vectors);
6137*4882a593Smuzhiyun } else /* MFI adapters */
6138*4882a593Smuzhiyun instance->msix_vectors = 1;
6139*4882a593Smuzhiyun
6140*4882a593Smuzhiyun
6141*4882a593Smuzhiyun /*
6142*4882a593Smuzhiyun * For Aero (if some conditions are met), driver will configure a
6143*4882a593Smuzhiyun * few additional reply queues with interrupt coalescing enabled.
6144*4882a593Smuzhiyun * These queues with interrupt coalescing enabled are called
6145*4882a593Smuzhiyun * High IOPS queues and rest of reply queues (based on number of
6146*4882a593Smuzhiyun * logical CPUs) are termed as Low latency queues.
6147*4882a593Smuzhiyun *
6148*4882a593Smuzhiyun * Total Number of reply queues = High IOPS queues + low latency queues
6149*4882a593Smuzhiyun *
6150*4882a593Smuzhiyun * For rest of fusion adapters, 1 additional reply queue will be
6151*4882a593Smuzhiyun * reserved for management commands, rest of reply queues
6152*4882a593Smuzhiyun * (based on number of logical CPUs) will be used for IOs and
6153*4882a593Smuzhiyun * referenced as IO queues.
6154*4882a593Smuzhiyun * Total Number of reply queues = 1 + IO queues
6155*4882a593Smuzhiyun *
6156*4882a593Smuzhiyun * MFI adapters supports single MSI-x so single reply queue
6157*4882a593Smuzhiyun * will be used for IO and management commands.
6158*4882a593Smuzhiyun */
6159*4882a593Smuzhiyun
6160*4882a593Smuzhiyun intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6161*4882a593Smuzhiyun true : false;
6162*4882a593Smuzhiyun if (intr_coalescing &&
6163*4882a593Smuzhiyun (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6164*4882a593Smuzhiyun (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6165*4882a593Smuzhiyun instance->perf_mode = MR_BALANCED_PERF_MODE;
6166*4882a593Smuzhiyun else
6167*4882a593Smuzhiyun instance->perf_mode = MR_LATENCY_PERF_MODE;
6168*4882a593Smuzhiyun
6169*4882a593Smuzhiyun
6170*4882a593Smuzhiyun if (instance->adapter_type == AERO_SERIES) {
6171*4882a593Smuzhiyun pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6172*4882a593Smuzhiyun speed = lnksta & PCI_EXP_LNKSTA_CLS;
6173*4882a593Smuzhiyun
6174*4882a593Smuzhiyun /*
6175*4882a593Smuzhiyun * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6176*4882a593Smuzhiyun * in latency perf mode and enable R1 PCI bandwidth algorithm
6177*4882a593Smuzhiyun */
6178*4882a593Smuzhiyun if (speed < 0x4) {
6179*4882a593Smuzhiyun instance->perf_mode = MR_LATENCY_PERF_MODE;
6180*4882a593Smuzhiyun fusion->pcie_bw_limitation = true;
6181*4882a593Smuzhiyun }
6182*4882a593Smuzhiyun
6183*4882a593Smuzhiyun /*
6184*4882a593Smuzhiyun * Performance mode settings provided through module parameter-perf_mode will
6185*4882a593Smuzhiyun * take affect only for:
6186*4882a593Smuzhiyun * 1. Aero family of adapters.
6187*4882a593Smuzhiyun * 2. When user sets module parameter- perf_mode in range of 0-2.
6188*4882a593Smuzhiyun */
6189*4882a593Smuzhiyun if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6190*4882a593Smuzhiyun (perf_mode <= MR_LATENCY_PERF_MODE))
6191*4882a593Smuzhiyun instance->perf_mode = perf_mode;
6192*4882a593Smuzhiyun /*
6193*4882a593Smuzhiyun * If intr coalescing is not supported by controller FW, then IOPS
6194*4882a593Smuzhiyun * and Balanced modes are not feasible.
6195*4882a593Smuzhiyun */
6196*4882a593Smuzhiyun if (!intr_coalescing)
6197*4882a593Smuzhiyun instance->perf_mode = MR_LATENCY_PERF_MODE;
6198*4882a593Smuzhiyun
6199*4882a593Smuzhiyun }
6200*4882a593Smuzhiyun
6201*4882a593Smuzhiyun if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6202*4882a593Smuzhiyun instance->low_latency_index_start =
6203*4882a593Smuzhiyun MR_HIGH_IOPS_QUEUE_COUNT;
6204*4882a593Smuzhiyun else
6205*4882a593Smuzhiyun instance->low_latency_index_start = 1;
6206*4882a593Smuzhiyun
6207*4882a593Smuzhiyun num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6208*4882a593Smuzhiyun
6209*4882a593Smuzhiyun instance->msix_vectors = min(num_msix_req,
6210*4882a593Smuzhiyun instance->msix_vectors);
6211*4882a593Smuzhiyun
6212*4882a593Smuzhiyun megasas_alloc_irq_vectors(instance);
6213*4882a593Smuzhiyun if (!instance->msix_vectors)
6214*4882a593Smuzhiyun instance->msix_load_balance = false;
6215*4882a593Smuzhiyun }
6216*4882a593Smuzhiyun /*
6217*4882a593Smuzhiyun * MSI-X host index 0 is common for all adapter.
6218*4882a593Smuzhiyun * It is used for all MPT based Adapters.
6219*4882a593Smuzhiyun */
6220*4882a593Smuzhiyun if (instance->msix_combined) {
6221*4882a593Smuzhiyun instance->reply_post_host_index_addr[0] =
6222*4882a593Smuzhiyun (u32 *)((u8 *)instance->reg_set +
6223*4882a593Smuzhiyun MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6224*4882a593Smuzhiyun } else {
6225*4882a593Smuzhiyun instance->reply_post_host_index_addr[0] =
6226*4882a593Smuzhiyun (u32 *)((u8 *)instance->reg_set +
6227*4882a593Smuzhiyun MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6228*4882a593Smuzhiyun }
6229*4882a593Smuzhiyun
6230*4882a593Smuzhiyun if (!instance->msix_vectors) {
6231*4882a593Smuzhiyun i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6232*4882a593Smuzhiyun if (i < 0)
6233*4882a593Smuzhiyun goto fail_init_adapter;
6234*4882a593Smuzhiyun }
6235*4882a593Smuzhiyun
6236*4882a593Smuzhiyun megasas_setup_reply_map(instance);
6237*4882a593Smuzhiyun
6238*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6239*4882a593Smuzhiyun "current msix/online cpus\t: (%d/%d)\n",
6240*4882a593Smuzhiyun instance->msix_vectors, (unsigned int)num_online_cpus());
6241*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6242*4882a593Smuzhiyun "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6243*4882a593Smuzhiyun
6244*4882a593Smuzhiyun tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6245*4882a593Smuzhiyun (unsigned long)instance);
6246*4882a593Smuzhiyun
6247*4882a593Smuzhiyun /*
6248*4882a593Smuzhiyun * Below are default value for legacy Firmware.
6249*4882a593Smuzhiyun * non-fusion based controllers
6250*4882a593Smuzhiyun */
6251*4882a593Smuzhiyun instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6252*4882a593Smuzhiyun instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6253*4882a593Smuzhiyun /* Get operational params, sge flags, send init cmd to controller */
6254*4882a593Smuzhiyun if (instance->instancet->init_adapter(instance))
6255*4882a593Smuzhiyun goto fail_init_adapter;
6256*4882a593Smuzhiyun
6257*4882a593Smuzhiyun if (instance->adapter_type >= VENTURA_SERIES) {
6258*4882a593Smuzhiyun scratch_pad_3 =
6259*4882a593Smuzhiyun megasas_readl(instance,
6260*4882a593Smuzhiyun &instance->reg_set->outbound_scratch_pad_3);
6261*4882a593Smuzhiyun if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6262*4882a593Smuzhiyun MR_DEFAULT_NVME_PAGE_SHIFT)
6263*4882a593Smuzhiyun instance->nvme_page_size =
6264*4882a593Smuzhiyun (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6265*4882a593Smuzhiyun
6266*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6267*4882a593Smuzhiyun "NVME page size\t: (%d)\n", instance->nvme_page_size);
6268*4882a593Smuzhiyun }
6269*4882a593Smuzhiyun
6270*4882a593Smuzhiyun if (instance->msix_vectors ?
6271*4882a593Smuzhiyun megasas_setup_irqs_msix(instance, 1) :
6272*4882a593Smuzhiyun megasas_setup_irqs_ioapic(instance))
6273*4882a593Smuzhiyun goto fail_init_adapter;
6274*4882a593Smuzhiyun
6275*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
6276*4882a593Smuzhiyun megasas_setup_irq_poll(instance);
6277*4882a593Smuzhiyun
6278*4882a593Smuzhiyun instance->instancet->enable_intr(instance);
6279*4882a593Smuzhiyun
6280*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "INIT adapter done\n");
6281*4882a593Smuzhiyun
6282*4882a593Smuzhiyun megasas_setup_jbod_map(instance);
6283*4882a593Smuzhiyun
6284*4882a593Smuzhiyun if (megasas_get_device_list(instance) != SUCCESS) {
6285*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6286*4882a593Smuzhiyun "%s: megasas_get_device_list failed\n",
6287*4882a593Smuzhiyun __func__);
6288*4882a593Smuzhiyun goto fail_get_ld_pd_list;
6289*4882a593Smuzhiyun }
6290*4882a593Smuzhiyun
6291*4882a593Smuzhiyun /* stream detection initialization */
6292*4882a593Smuzhiyun if (instance->adapter_type >= VENTURA_SERIES) {
6293*4882a593Smuzhiyun fusion->stream_detect_by_ld =
6294*4882a593Smuzhiyun kcalloc(MAX_LOGICAL_DRIVES_EXT,
6295*4882a593Smuzhiyun sizeof(struct LD_STREAM_DETECT *),
6296*4882a593Smuzhiyun GFP_KERNEL);
6297*4882a593Smuzhiyun if (!fusion->stream_detect_by_ld) {
6298*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6299*4882a593Smuzhiyun "unable to allocate stream detection for pool of LDs\n");
6300*4882a593Smuzhiyun goto fail_get_ld_pd_list;
6301*4882a593Smuzhiyun }
6302*4882a593Smuzhiyun for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6303*4882a593Smuzhiyun fusion->stream_detect_by_ld[i] =
6304*4882a593Smuzhiyun kzalloc(sizeof(struct LD_STREAM_DETECT),
6305*4882a593Smuzhiyun GFP_KERNEL);
6306*4882a593Smuzhiyun if (!fusion->stream_detect_by_ld[i]) {
6307*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6308*4882a593Smuzhiyun "unable to allocate stream detect by LD\n ");
6309*4882a593Smuzhiyun for (j = 0; j < i; ++j)
6310*4882a593Smuzhiyun kfree(fusion->stream_detect_by_ld[j]);
6311*4882a593Smuzhiyun kfree(fusion->stream_detect_by_ld);
6312*4882a593Smuzhiyun fusion->stream_detect_by_ld = NULL;
6313*4882a593Smuzhiyun goto fail_get_ld_pd_list;
6314*4882a593Smuzhiyun }
6315*4882a593Smuzhiyun fusion->stream_detect_by_ld[i]->mru_bit_map
6316*4882a593Smuzhiyun = MR_STREAM_BITMAP;
6317*4882a593Smuzhiyun }
6318*4882a593Smuzhiyun }
6319*4882a593Smuzhiyun
6320*4882a593Smuzhiyun /*
6321*4882a593Smuzhiyun * Compute the max allowed sectors per IO: The controller info has two
6322*4882a593Smuzhiyun * limits on max sectors. Driver should use the minimum of these two.
6323*4882a593Smuzhiyun *
6324*4882a593Smuzhiyun * 1 << stripe_sz_ops.min = max sectors per strip
6325*4882a593Smuzhiyun *
6326*4882a593Smuzhiyun * Note that older firmwares ( < FW ver 30) didn't report information
6327*4882a593Smuzhiyun * to calculate max_sectors_1. So the number ended up as zero always.
6328*4882a593Smuzhiyun */
6329*4882a593Smuzhiyun tmp_sectors = 0;
6330*4882a593Smuzhiyun ctrl_info = instance->ctrl_info_buf;
6331*4882a593Smuzhiyun
6332*4882a593Smuzhiyun max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6333*4882a593Smuzhiyun le16_to_cpu(ctrl_info->max_strips_per_io);
6334*4882a593Smuzhiyun max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6335*4882a593Smuzhiyun
6336*4882a593Smuzhiyun tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6337*4882a593Smuzhiyun
6338*4882a593Smuzhiyun instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6339*4882a593Smuzhiyun instance->passive = ctrl_info->cluster.passive;
6340*4882a593Smuzhiyun memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6341*4882a593Smuzhiyun instance->UnevenSpanSupport =
6342*4882a593Smuzhiyun ctrl_info->adapterOperations2.supportUnevenSpans;
6343*4882a593Smuzhiyun if (instance->UnevenSpanSupport) {
6344*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
6345*4882a593Smuzhiyun if (MR_ValidateMapInfo(instance, instance->map_id))
6346*4882a593Smuzhiyun fusion->fast_path_io = 1;
6347*4882a593Smuzhiyun else
6348*4882a593Smuzhiyun fusion->fast_path_io = 0;
6349*4882a593Smuzhiyun
6350*4882a593Smuzhiyun }
6351*4882a593Smuzhiyun if (ctrl_info->host_interface.SRIOV) {
6352*4882a593Smuzhiyun instance->requestorId = ctrl_info->iov.requestorId;
6353*4882a593Smuzhiyun if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6354*4882a593Smuzhiyun if (!ctrl_info->adapterOperations2.activePassive)
6355*4882a593Smuzhiyun instance->PlasmaFW111 = 1;
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6358*4882a593Smuzhiyun instance->PlasmaFW111 ? "1.11" : "new");
6359*4882a593Smuzhiyun
6360*4882a593Smuzhiyun if (instance->PlasmaFW111) {
6361*4882a593Smuzhiyun iovPtr = (struct IOV_111 *)
6362*4882a593Smuzhiyun ((unsigned char *)ctrl_info + IOV_111_OFFSET);
6363*4882a593Smuzhiyun instance->requestorId = iovPtr->requestorId;
6364*4882a593Smuzhiyun }
6365*4882a593Smuzhiyun }
6366*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6367*4882a593Smuzhiyun instance->requestorId);
6368*4882a593Smuzhiyun }
6369*4882a593Smuzhiyun
6370*4882a593Smuzhiyun instance->crash_dump_fw_support =
6371*4882a593Smuzhiyun ctrl_info->adapterOperations3.supportCrashDump;
6372*4882a593Smuzhiyun instance->crash_dump_drv_support =
6373*4882a593Smuzhiyun (instance->crash_dump_fw_support &&
6374*4882a593Smuzhiyun instance->crash_dump_buf);
6375*4882a593Smuzhiyun if (instance->crash_dump_drv_support)
6376*4882a593Smuzhiyun megasas_set_crash_dump_params(instance,
6377*4882a593Smuzhiyun MR_CRASH_BUF_TURN_OFF);
6378*4882a593Smuzhiyun
6379*4882a593Smuzhiyun else {
6380*4882a593Smuzhiyun if (instance->crash_dump_buf)
6381*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
6382*4882a593Smuzhiyun CRASH_DMA_BUF_SIZE,
6383*4882a593Smuzhiyun instance->crash_dump_buf,
6384*4882a593Smuzhiyun instance->crash_dump_h);
6385*4882a593Smuzhiyun instance->crash_dump_buf = NULL;
6386*4882a593Smuzhiyun }
6387*4882a593Smuzhiyun
6388*4882a593Smuzhiyun if (instance->snapdump_wait_time) {
6389*4882a593Smuzhiyun megasas_get_snapdump_properties(instance);
6390*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6391*4882a593Smuzhiyun instance->snapdump_wait_time);
6392*4882a593Smuzhiyun }
6393*4882a593Smuzhiyun
6394*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6395*4882a593Smuzhiyun "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6396*4882a593Smuzhiyun le16_to_cpu(ctrl_info->pci.vendor_id),
6397*4882a593Smuzhiyun le16_to_cpu(ctrl_info->pci.device_id),
6398*4882a593Smuzhiyun le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6399*4882a593Smuzhiyun le16_to_cpu(ctrl_info->pci.sub_device_id));
6400*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
6401*4882a593Smuzhiyun instance->UnevenSpanSupport ? "yes" : "no");
6402*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
6403*4882a593Smuzhiyun instance->crash_dump_drv_support ? "yes" : "no");
6404*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
6405*4882a593Smuzhiyun instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6406*4882a593Smuzhiyun
6407*4882a593Smuzhiyun instance->max_sectors_per_req = instance->max_num_sge *
6408*4882a593Smuzhiyun SGE_BUFFER_SIZE / 512;
6409*4882a593Smuzhiyun if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6410*4882a593Smuzhiyun instance->max_sectors_per_req = tmp_sectors;
6411*4882a593Smuzhiyun
6412*4882a593Smuzhiyun /* Check for valid throttlequeuedepth module parameter */
6413*4882a593Smuzhiyun if (throttlequeuedepth &&
6414*4882a593Smuzhiyun throttlequeuedepth <= instance->max_scsi_cmds)
6415*4882a593Smuzhiyun instance->throttlequeuedepth = throttlequeuedepth;
6416*4882a593Smuzhiyun else
6417*4882a593Smuzhiyun instance->throttlequeuedepth =
6418*4882a593Smuzhiyun MEGASAS_THROTTLE_QUEUE_DEPTH;
6419*4882a593Smuzhiyun
6420*4882a593Smuzhiyun if ((resetwaittime < 1) ||
6421*4882a593Smuzhiyun (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6422*4882a593Smuzhiyun resetwaittime = MEGASAS_RESET_WAIT_TIME;
6423*4882a593Smuzhiyun
6424*4882a593Smuzhiyun if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6425*4882a593Smuzhiyun scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6426*4882a593Smuzhiyun
6427*4882a593Smuzhiyun /* Launch SR-IOV heartbeat timer */
6428*4882a593Smuzhiyun if (instance->requestorId) {
6429*4882a593Smuzhiyun if (!megasas_sriov_start_heartbeat(instance, 1)) {
6430*4882a593Smuzhiyun megasas_start_timer(instance);
6431*4882a593Smuzhiyun } else {
6432*4882a593Smuzhiyun instance->skip_heartbeat_timer_del = 1;
6433*4882a593Smuzhiyun goto fail_get_ld_pd_list;
6434*4882a593Smuzhiyun }
6435*4882a593Smuzhiyun }
6436*4882a593Smuzhiyun
6437*4882a593Smuzhiyun /*
6438*4882a593Smuzhiyun * Create and start watchdog thread which will monitor
6439*4882a593Smuzhiyun * controller state every 1 sec and trigger OCR when
6440*4882a593Smuzhiyun * it enters fault state
6441*4882a593Smuzhiyun */
6442*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
6443*4882a593Smuzhiyun if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6444*4882a593Smuzhiyun goto fail_start_watchdog;
6445*4882a593Smuzhiyun
6446*4882a593Smuzhiyun return 0;
6447*4882a593Smuzhiyun
6448*4882a593Smuzhiyun fail_start_watchdog:
6449*4882a593Smuzhiyun if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6450*4882a593Smuzhiyun del_timer_sync(&instance->sriov_heartbeat_timer);
6451*4882a593Smuzhiyun fail_get_ld_pd_list:
6452*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
6453*4882a593Smuzhiyun megasas_destroy_irqs(instance);
6454*4882a593Smuzhiyun fail_init_adapter:
6455*4882a593Smuzhiyun if (instance->msix_vectors)
6456*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
6457*4882a593Smuzhiyun instance->msix_vectors = 0;
6458*4882a593Smuzhiyun fail_alloc_dma_buf:
6459*4882a593Smuzhiyun megasas_free_ctrl_dma_buffers(instance);
6460*4882a593Smuzhiyun megasas_free_ctrl_mem(instance);
6461*4882a593Smuzhiyun fail_ready_state:
6462*4882a593Smuzhiyun iounmap(instance->reg_set);
6463*4882a593Smuzhiyun
6464*4882a593Smuzhiyun fail_ioremap:
6465*4882a593Smuzhiyun pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6466*4882a593Smuzhiyun
6467*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6468*4882a593Smuzhiyun __func__, __LINE__);
6469*4882a593Smuzhiyun return -EINVAL;
6470*4882a593Smuzhiyun }
6471*4882a593Smuzhiyun
6472*4882a593Smuzhiyun /**
6473*4882a593Smuzhiyun * megasas_release_mfi - Reverses the FW initialization
6474*4882a593Smuzhiyun * @instance: Adapter soft state
6475*4882a593Smuzhiyun */
megasas_release_mfi(struct megasas_instance * instance)6476*4882a593Smuzhiyun static void megasas_release_mfi(struct megasas_instance *instance)
6477*4882a593Smuzhiyun {
6478*4882a593Smuzhiyun u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6479*4882a593Smuzhiyun
6480*4882a593Smuzhiyun if (instance->reply_queue)
6481*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6482*4882a593Smuzhiyun instance->reply_queue, instance->reply_queue_h);
6483*4882a593Smuzhiyun
6484*4882a593Smuzhiyun megasas_free_cmds(instance);
6485*4882a593Smuzhiyun
6486*4882a593Smuzhiyun iounmap(instance->reg_set);
6487*4882a593Smuzhiyun
6488*4882a593Smuzhiyun pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6489*4882a593Smuzhiyun }
6490*4882a593Smuzhiyun
6491*4882a593Smuzhiyun /**
6492*4882a593Smuzhiyun * megasas_get_seq_num - Gets latest event sequence numbers
6493*4882a593Smuzhiyun * @instance: Adapter soft state
6494*4882a593Smuzhiyun * @eli: FW event log sequence numbers information
6495*4882a593Smuzhiyun *
6496*4882a593Smuzhiyun * FW maintains a log of all events in a non-volatile area. Upper layers would
6497*4882a593Smuzhiyun * usually find out the latest sequence number of the events, the seq number at
6498*4882a593Smuzhiyun * the boot etc. They would "read" all the events below the latest seq number
6499*4882a593Smuzhiyun * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6500*4882a593Smuzhiyun * number), they would subsribe to AEN (asynchronous event notification) and
6501*4882a593Smuzhiyun * wait for the events to happen.
6502*4882a593Smuzhiyun */
6503*4882a593Smuzhiyun static int
megasas_get_seq_num(struct megasas_instance * instance,struct megasas_evt_log_info * eli)6504*4882a593Smuzhiyun megasas_get_seq_num(struct megasas_instance *instance,
6505*4882a593Smuzhiyun struct megasas_evt_log_info *eli)
6506*4882a593Smuzhiyun {
6507*4882a593Smuzhiyun struct megasas_cmd *cmd;
6508*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
6509*4882a593Smuzhiyun struct megasas_evt_log_info *el_info;
6510*4882a593Smuzhiyun dma_addr_t el_info_h = 0;
6511*4882a593Smuzhiyun int ret;
6512*4882a593Smuzhiyun
6513*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
6514*4882a593Smuzhiyun
6515*4882a593Smuzhiyun if (!cmd) {
6516*4882a593Smuzhiyun return -ENOMEM;
6517*4882a593Smuzhiyun }
6518*4882a593Smuzhiyun
6519*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
6520*4882a593Smuzhiyun el_info = dma_alloc_coherent(&instance->pdev->dev,
6521*4882a593Smuzhiyun sizeof(struct megasas_evt_log_info),
6522*4882a593Smuzhiyun &el_info_h, GFP_KERNEL);
6523*4882a593Smuzhiyun if (!el_info) {
6524*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
6525*4882a593Smuzhiyun return -ENOMEM;
6526*4882a593Smuzhiyun }
6527*4882a593Smuzhiyun
6528*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6529*4882a593Smuzhiyun
6530*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
6531*4882a593Smuzhiyun dcmd->cmd_status = 0x0;
6532*4882a593Smuzhiyun dcmd->sge_count = 1;
6533*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
6534*4882a593Smuzhiyun dcmd->timeout = 0;
6535*4882a593Smuzhiyun dcmd->pad_0 = 0;
6536*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6537*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6538*4882a593Smuzhiyun
6539*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, el_info_h,
6540*4882a593Smuzhiyun sizeof(struct megasas_evt_log_info));
6541*4882a593Smuzhiyun
6542*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6543*4882a593Smuzhiyun if (ret != DCMD_SUCCESS) {
6544*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6545*4882a593Smuzhiyun __func__, __LINE__);
6546*4882a593Smuzhiyun goto dcmd_failed;
6547*4882a593Smuzhiyun }
6548*4882a593Smuzhiyun
6549*4882a593Smuzhiyun /*
6550*4882a593Smuzhiyun * Copy the data back into callers buffer
6551*4882a593Smuzhiyun */
6552*4882a593Smuzhiyun eli->newest_seq_num = el_info->newest_seq_num;
6553*4882a593Smuzhiyun eli->oldest_seq_num = el_info->oldest_seq_num;
6554*4882a593Smuzhiyun eli->clear_seq_num = el_info->clear_seq_num;
6555*4882a593Smuzhiyun eli->shutdown_seq_num = el_info->shutdown_seq_num;
6556*4882a593Smuzhiyun eli->boot_seq_num = el_info->boot_seq_num;
6557*4882a593Smuzhiyun
6558*4882a593Smuzhiyun dcmd_failed:
6559*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
6560*4882a593Smuzhiyun sizeof(struct megasas_evt_log_info),
6561*4882a593Smuzhiyun el_info, el_info_h);
6562*4882a593Smuzhiyun
6563*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
6564*4882a593Smuzhiyun
6565*4882a593Smuzhiyun return ret;
6566*4882a593Smuzhiyun }
6567*4882a593Smuzhiyun
6568*4882a593Smuzhiyun /**
6569*4882a593Smuzhiyun * megasas_register_aen - Registers for asynchronous event notification
6570*4882a593Smuzhiyun * @instance: Adapter soft state
6571*4882a593Smuzhiyun * @seq_num: The starting sequence number
6572*4882a593Smuzhiyun * @class_locale_word: Class of the event
6573*4882a593Smuzhiyun *
6574*4882a593Smuzhiyun * This function subscribes for AEN for events beyond the @seq_num. It requests
6575*4882a593Smuzhiyun * to be notified if and only if the event is of type @class_locale
6576*4882a593Smuzhiyun */
6577*4882a593Smuzhiyun static int
megasas_register_aen(struct megasas_instance * instance,u32 seq_num,u32 class_locale_word)6578*4882a593Smuzhiyun megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6579*4882a593Smuzhiyun u32 class_locale_word)
6580*4882a593Smuzhiyun {
6581*4882a593Smuzhiyun int ret_val;
6582*4882a593Smuzhiyun struct megasas_cmd *cmd;
6583*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
6584*4882a593Smuzhiyun union megasas_evt_class_locale curr_aen;
6585*4882a593Smuzhiyun union megasas_evt_class_locale prev_aen;
6586*4882a593Smuzhiyun
6587*4882a593Smuzhiyun /*
6588*4882a593Smuzhiyun * If there an AEN pending already (aen_cmd), check if the
6589*4882a593Smuzhiyun * class_locale of that pending AEN is inclusive of the new
6590*4882a593Smuzhiyun * AEN request we currently have. If it is, then we don't have
6591*4882a593Smuzhiyun * to do anything. In other words, whichever events the current
6592*4882a593Smuzhiyun * AEN request is subscribing to, have already been subscribed
6593*4882a593Smuzhiyun * to.
6594*4882a593Smuzhiyun *
6595*4882a593Smuzhiyun * If the old_cmd is _not_ inclusive, then we have to abort
6596*4882a593Smuzhiyun * that command, form a class_locale that is superset of both
6597*4882a593Smuzhiyun * old and current and re-issue to the FW
6598*4882a593Smuzhiyun */
6599*4882a593Smuzhiyun
6600*4882a593Smuzhiyun curr_aen.word = class_locale_word;
6601*4882a593Smuzhiyun
6602*4882a593Smuzhiyun if (instance->aen_cmd) {
6603*4882a593Smuzhiyun
6604*4882a593Smuzhiyun prev_aen.word =
6605*4882a593Smuzhiyun le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6606*4882a593Smuzhiyun
6607*4882a593Smuzhiyun if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6608*4882a593Smuzhiyun (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6609*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6610*4882a593Smuzhiyun "%s %d out of range class %d send by application\n",
6611*4882a593Smuzhiyun __func__, __LINE__, curr_aen.members.class);
6612*4882a593Smuzhiyun return 0;
6613*4882a593Smuzhiyun }
6614*4882a593Smuzhiyun
6615*4882a593Smuzhiyun /*
6616*4882a593Smuzhiyun * A class whose enum value is smaller is inclusive of all
6617*4882a593Smuzhiyun * higher values. If a PROGRESS (= -1) was previously
6618*4882a593Smuzhiyun * registered, then a new registration requests for higher
6619*4882a593Smuzhiyun * classes need not be sent to FW. They are automatically
6620*4882a593Smuzhiyun * included.
6621*4882a593Smuzhiyun *
6622*4882a593Smuzhiyun * Locale numbers don't have such hierarchy. They are bitmap
6623*4882a593Smuzhiyun * values
6624*4882a593Smuzhiyun */
6625*4882a593Smuzhiyun if ((prev_aen.members.class <= curr_aen.members.class) &&
6626*4882a593Smuzhiyun !((prev_aen.members.locale & curr_aen.members.locale) ^
6627*4882a593Smuzhiyun curr_aen.members.locale)) {
6628*4882a593Smuzhiyun /*
6629*4882a593Smuzhiyun * Previously issued event registration includes
6630*4882a593Smuzhiyun * current request. Nothing to do.
6631*4882a593Smuzhiyun */
6632*4882a593Smuzhiyun return 0;
6633*4882a593Smuzhiyun } else {
6634*4882a593Smuzhiyun curr_aen.members.locale |= prev_aen.members.locale;
6635*4882a593Smuzhiyun
6636*4882a593Smuzhiyun if (prev_aen.members.class < curr_aen.members.class)
6637*4882a593Smuzhiyun curr_aen.members.class = prev_aen.members.class;
6638*4882a593Smuzhiyun
6639*4882a593Smuzhiyun instance->aen_cmd->abort_aen = 1;
6640*4882a593Smuzhiyun ret_val = megasas_issue_blocked_abort_cmd(instance,
6641*4882a593Smuzhiyun instance->
6642*4882a593Smuzhiyun aen_cmd, 30);
6643*4882a593Smuzhiyun
6644*4882a593Smuzhiyun if (ret_val) {
6645*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6646*4882a593Smuzhiyun "previous AEN command\n");
6647*4882a593Smuzhiyun return ret_val;
6648*4882a593Smuzhiyun }
6649*4882a593Smuzhiyun }
6650*4882a593Smuzhiyun }
6651*4882a593Smuzhiyun
6652*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
6653*4882a593Smuzhiyun
6654*4882a593Smuzhiyun if (!cmd)
6655*4882a593Smuzhiyun return -ENOMEM;
6656*4882a593Smuzhiyun
6657*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
6658*4882a593Smuzhiyun
6659*4882a593Smuzhiyun memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6660*4882a593Smuzhiyun
6661*4882a593Smuzhiyun /*
6662*4882a593Smuzhiyun * Prepare DCMD for aen registration
6663*4882a593Smuzhiyun */
6664*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6665*4882a593Smuzhiyun
6666*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
6667*4882a593Smuzhiyun dcmd->cmd_status = 0x0;
6668*4882a593Smuzhiyun dcmd->sge_count = 1;
6669*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
6670*4882a593Smuzhiyun dcmd->timeout = 0;
6671*4882a593Smuzhiyun dcmd->pad_0 = 0;
6672*4882a593Smuzhiyun dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6673*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6674*4882a593Smuzhiyun dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6675*4882a593Smuzhiyun instance->last_seq_num = seq_num;
6676*4882a593Smuzhiyun dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6677*4882a593Smuzhiyun
6678*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6679*4882a593Smuzhiyun sizeof(struct megasas_evt_detail));
6680*4882a593Smuzhiyun
6681*4882a593Smuzhiyun if (instance->aen_cmd != NULL) {
6682*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
6683*4882a593Smuzhiyun return 0;
6684*4882a593Smuzhiyun }
6685*4882a593Smuzhiyun
6686*4882a593Smuzhiyun /*
6687*4882a593Smuzhiyun * Store reference to the cmd used to register for AEN. When an
6688*4882a593Smuzhiyun * application wants us to register for AEN, we have to abort this
6689*4882a593Smuzhiyun * cmd and re-register with a new EVENT LOCALE supplied by that app
6690*4882a593Smuzhiyun */
6691*4882a593Smuzhiyun instance->aen_cmd = cmd;
6692*4882a593Smuzhiyun
6693*4882a593Smuzhiyun /*
6694*4882a593Smuzhiyun * Issue the aen registration frame
6695*4882a593Smuzhiyun */
6696*4882a593Smuzhiyun instance->instancet->issue_dcmd(instance, cmd);
6697*4882a593Smuzhiyun
6698*4882a593Smuzhiyun return 0;
6699*4882a593Smuzhiyun }
6700*4882a593Smuzhiyun
6701*4882a593Smuzhiyun /* megasas_get_target_prop - Send DCMD with below details to firmware.
6702*4882a593Smuzhiyun *
6703*4882a593Smuzhiyun * This DCMD will fetch few properties of LD/system PD defined
6704*4882a593Smuzhiyun * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6705*4882a593Smuzhiyun *
6706*4882a593Smuzhiyun * DCMD send by drivers whenever new target is added to the OS.
6707*4882a593Smuzhiyun *
6708*4882a593Smuzhiyun * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
6709*4882a593Smuzhiyun * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
6710*4882a593Smuzhiyun * 0 = system PD, 1 = LD.
6711*4882a593Smuzhiyun * dcmd.mbox.s[1] - TargetID for LD/system PD.
6712*4882a593Smuzhiyun * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
6713*4882a593Smuzhiyun *
6714*4882a593Smuzhiyun * @instance: Adapter soft state
6715*4882a593Smuzhiyun * @sdev: OS provided scsi device
6716*4882a593Smuzhiyun *
6717*4882a593Smuzhiyun * Returns 0 on success non-zero on failure.
6718*4882a593Smuzhiyun */
6719*4882a593Smuzhiyun int
megasas_get_target_prop(struct megasas_instance * instance,struct scsi_device * sdev)6720*4882a593Smuzhiyun megasas_get_target_prop(struct megasas_instance *instance,
6721*4882a593Smuzhiyun struct scsi_device *sdev)
6722*4882a593Smuzhiyun {
6723*4882a593Smuzhiyun int ret;
6724*4882a593Smuzhiyun struct megasas_cmd *cmd;
6725*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
6726*4882a593Smuzhiyun u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6727*4882a593Smuzhiyun sdev->id;
6728*4882a593Smuzhiyun
6729*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
6730*4882a593Smuzhiyun
6731*4882a593Smuzhiyun if (!cmd) {
6732*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6733*4882a593Smuzhiyun "Failed to get cmd %s\n", __func__);
6734*4882a593Smuzhiyun return -ENOMEM;
6735*4882a593Smuzhiyun }
6736*4882a593Smuzhiyun
6737*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
6738*4882a593Smuzhiyun
6739*4882a593Smuzhiyun memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6740*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6741*4882a593Smuzhiyun dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6742*4882a593Smuzhiyun
6743*4882a593Smuzhiyun dcmd->mbox.s[1] = cpu_to_le16(targetId);
6744*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
6745*4882a593Smuzhiyun dcmd->cmd_status = 0xFF;
6746*4882a593Smuzhiyun dcmd->sge_count = 1;
6747*4882a593Smuzhiyun dcmd->flags = MFI_FRAME_DIR_READ;
6748*4882a593Smuzhiyun dcmd->timeout = 0;
6749*4882a593Smuzhiyun dcmd->pad_0 = 0;
6750*4882a593Smuzhiyun dcmd->data_xfer_len =
6751*4882a593Smuzhiyun cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6752*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6753*4882a593Smuzhiyun
6754*4882a593Smuzhiyun megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6755*4882a593Smuzhiyun sizeof(struct MR_TARGET_PROPERTIES));
6756*4882a593Smuzhiyun
6757*4882a593Smuzhiyun if ((instance->adapter_type != MFI_SERIES) &&
6758*4882a593Smuzhiyun !instance->mask_interrupts)
6759*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance,
6760*4882a593Smuzhiyun cmd, MFI_IO_TIMEOUT_SECS);
6761*4882a593Smuzhiyun else
6762*4882a593Smuzhiyun ret = megasas_issue_polled(instance, cmd);
6763*4882a593Smuzhiyun
6764*4882a593Smuzhiyun switch (ret) {
6765*4882a593Smuzhiyun case DCMD_TIMEOUT:
6766*4882a593Smuzhiyun switch (dcmd_timeout_ocr_possible(instance)) {
6767*4882a593Smuzhiyun case INITIATE_OCR:
6768*4882a593Smuzhiyun cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6769*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
6770*4882a593Smuzhiyun megasas_reset_fusion(instance->host,
6771*4882a593Smuzhiyun MFI_IO_TIMEOUT_OCR);
6772*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
6773*4882a593Smuzhiyun break;
6774*4882a593Smuzhiyun case KILL_ADAPTER:
6775*4882a593Smuzhiyun megaraid_sas_kill_hba(instance);
6776*4882a593Smuzhiyun break;
6777*4882a593Smuzhiyun case IGNORE_TIMEOUT:
6778*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
6779*4882a593Smuzhiyun "Ignore DCMD timeout: %s %d\n",
6780*4882a593Smuzhiyun __func__, __LINE__);
6781*4882a593Smuzhiyun break;
6782*4882a593Smuzhiyun }
6783*4882a593Smuzhiyun break;
6784*4882a593Smuzhiyun
6785*4882a593Smuzhiyun default:
6786*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
6787*4882a593Smuzhiyun }
6788*4882a593Smuzhiyun if (ret != DCMD_SUCCESS)
6789*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6790*4882a593Smuzhiyun "return from %s %d return value %d\n",
6791*4882a593Smuzhiyun __func__, __LINE__, ret);
6792*4882a593Smuzhiyun
6793*4882a593Smuzhiyun return ret;
6794*4882a593Smuzhiyun }
6795*4882a593Smuzhiyun
6796*4882a593Smuzhiyun /**
6797*4882a593Smuzhiyun * megasas_start_aen - Subscribes to AEN during driver load time
6798*4882a593Smuzhiyun * @instance: Adapter soft state
6799*4882a593Smuzhiyun */
megasas_start_aen(struct megasas_instance * instance)6800*4882a593Smuzhiyun static int megasas_start_aen(struct megasas_instance *instance)
6801*4882a593Smuzhiyun {
6802*4882a593Smuzhiyun struct megasas_evt_log_info eli;
6803*4882a593Smuzhiyun union megasas_evt_class_locale class_locale;
6804*4882a593Smuzhiyun
6805*4882a593Smuzhiyun /*
6806*4882a593Smuzhiyun * Get the latest sequence number from FW
6807*4882a593Smuzhiyun */
6808*4882a593Smuzhiyun memset(&eli, 0, sizeof(eli));
6809*4882a593Smuzhiyun
6810*4882a593Smuzhiyun if (megasas_get_seq_num(instance, &eli))
6811*4882a593Smuzhiyun return -1;
6812*4882a593Smuzhiyun
6813*4882a593Smuzhiyun /*
6814*4882a593Smuzhiyun * Register AEN with FW for latest sequence number plus 1
6815*4882a593Smuzhiyun */
6816*4882a593Smuzhiyun class_locale.members.reserved = 0;
6817*4882a593Smuzhiyun class_locale.members.locale = MR_EVT_LOCALE_ALL;
6818*4882a593Smuzhiyun class_locale.members.class = MR_EVT_CLASS_DEBUG;
6819*4882a593Smuzhiyun
6820*4882a593Smuzhiyun return megasas_register_aen(instance,
6821*4882a593Smuzhiyun le32_to_cpu(eli.newest_seq_num) + 1,
6822*4882a593Smuzhiyun class_locale.word);
6823*4882a593Smuzhiyun }
6824*4882a593Smuzhiyun
6825*4882a593Smuzhiyun /**
6826*4882a593Smuzhiyun * megasas_io_attach - Attaches this driver to SCSI mid-layer
6827*4882a593Smuzhiyun * @instance: Adapter soft state
6828*4882a593Smuzhiyun */
megasas_io_attach(struct megasas_instance * instance)6829*4882a593Smuzhiyun static int megasas_io_attach(struct megasas_instance *instance)
6830*4882a593Smuzhiyun {
6831*4882a593Smuzhiyun struct Scsi_Host *host = instance->host;
6832*4882a593Smuzhiyun
6833*4882a593Smuzhiyun /*
6834*4882a593Smuzhiyun * Export parameters required by SCSI mid-layer
6835*4882a593Smuzhiyun */
6836*4882a593Smuzhiyun host->unique_id = instance->unique_id;
6837*4882a593Smuzhiyun host->can_queue = instance->max_scsi_cmds;
6838*4882a593Smuzhiyun host->this_id = instance->init_id;
6839*4882a593Smuzhiyun host->sg_tablesize = instance->max_num_sge;
6840*4882a593Smuzhiyun
6841*4882a593Smuzhiyun if (instance->fw_support_ieee)
6842*4882a593Smuzhiyun instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6843*4882a593Smuzhiyun
6844*4882a593Smuzhiyun /*
6845*4882a593Smuzhiyun * Check if the module parameter value for max_sectors can be used
6846*4882a593Smuzhiyun */
6847*4882a593Smuzhiyun if (max_sectors && max_sectors < instance->max_sectors_per_req)
6848*4882a593Smuzhiyun instance->max_sectors_per_req = max_sectors;
6849*4882a593Smuzhiyun else {
6850*4882a593Smuzhiyun if (max_sectors) {
6851*4882a593Smuzhiyun if (((instance->pdev->device ==
6852*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6853*4882a593Smuzhiyun (instance->pdev->device ==
6854*4882a593Smuzhiyun PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6855*4882a593Smuzhiyun (max_sectors <= MEGASAS_MAX_SECTORS)) {
6856*4882a593Smuzhiyun instance->max_sectors_per_req = max_sectors;
6857*4882a593Smuzhiyun } else {
6858*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6859*4882a593Smuzhiyun "and <= %d (or < 1MB for GEN2 controller)\n",
6860*4882a593Smuzhiyun instance->max_sectors_per_req);
6861*4882a593Smuzhiyun }
6862*4882a593Smuzhiyun }
6863*4882a593Smuzhiyun }
6864*4882a593Smuzhiyun
6865*4882a593Smuzhiyun host->max_sectors = instance->max_sectors_per_req;
6866*4882a593Smuzhiyun host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6867*4882a593Smuzhiyun host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6868*4882a593Smuzhiyun host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6869*4882a593Smuzhiyun host->max_lun = MEGASAS_MAX_LUN;
6870*4882a593Smuzhiyun host->max_cmd_len = 16;
6871*4882a593Smuzhiyun
6872*4882a593Smuzhiyun /*
6873*4882a593Smuzhiyun * Notify the mid-layer about the new controller
6874*4882a593Smuzhiyun */
6875*4882a593Smuzhiyun if (scsi_add_host(host, &instance->pdev->dev)) {
6876*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
6877*4882a593Smuzhiyun "Failed to add host from %s %d\n",
6878*4882a593Smuzhiyun __func__, __LINE__);
6879*4882a593Smuzhiyun return -ENODEV;
6880*4882a593Smuzhiyun }
6881*4882a593Smuzhiyun
6882*4882a593Smuzhiyun return 0;
6883*4882a593Smuzhiyun }
6884*4882a593Smuzhiyun
6885*4882a593Smuzhiyun /**
6886*4882a593Smuzhiyun * megasas_set_dma_mask - Set DMA mask for supported controllers
6887*4882a593Smuzhiyun *
6888*4882a593Smuzhiyun * @instance: Adapter soft state
6889*4882a593Smuzhiyun * Description:
6890*4882a593Smuzhiyun *
6891*4882a593Smuzhiyun * For Ventura, driver/FW will operate in 63bit DMA addresses.
6892*4882a593Smuzhiyun *
6893*4882a593Smuzhiyun * For invader-
6894*4882a593Smuzhiyun * By default, driver/FW will operate in 32bit DMA addresses
6895*4882a593Smuzhiyun * for consistent DMA mapping but if 32 bit consistent
6896*4882a593Smuzhiyun * DMA mask fails, driver will try with 63 bit consistent
6897*4882a593Smuzhiyun * mask provided FW is true 63bit DMA capable
6898*4882a593Smuzhiyun *
6899*4882a593Smuzhiyun * For older controllers(Thunderbolt and MFI based adapters)-
6900*4882a593Smuzhiyun * driver/FW will operate in 32 bit consistent DMA addresses.
6901*4882a593Smuzhiyun */
6902*4882a593Smuzhiyun static int
megasas_set_dma_mask(struct megasas_instance * instance)6903*4882a593Smuzhiyun megasas_set_dma_mask(struct megasas_instance *instance)
6904*4882a593Smuzhiyun {
6905*4882a593Smuzhiyun u64 consistent_mask;
6906*4882a593Smuzhiyun struct pci_dev *pdev;
6907*4882a593Smuzhiyun u32 scratch_pad_1;
6908*4882a593Smuzhiyun
6909*4882a593Smuzhiyun pdev = instance->pdev;
6910*4882a593Smuzhiyun consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6911*4882a593Smuzhiyun DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6912*4882a593Smuzhiyun
6913*4882a593Smuzhiyun if (IS_DMA64) {
6914*4882a593Smuzhiyun if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6915*4882a593Smuzhiyun dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6916*4882a593Smuzhiyun goto fail_set_dma_mask;
6917*4882a593Smuzhiyun
6918*4882a593Smuzhiyun if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6919*4882a593Smuzhiyun (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6920*4882a593Smuzhiyun dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6921*4882a593Smuzhiyun /*
6922*4882a593Smuzhiyun * If 32 bit DMA mask fails, then try for 64 bit mask
6923*4882a593Smuzhiyun * for FW capable of handling 64 bit DMA.
6924*4882a593Smuzhiyun */
6925*4882a593Smuzhiyun scratch_pad_1 = megasas_readl
6926*4882a593Smuzhiyun (instance, &instance->reg_set->outbound_scratch_pad_1);
6927*4882a593Smuzhiyun
6928*4882a593Smuzhiyun if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6929*4882a593Smuzhiyun goto fail_set_dma_mask;
6930*4882a593Smuzhiyun else if (dma_set_mask_and_coherent(&pdev->dev,
6931*4882a593Smuzhiyun DMA_BIT_MASK(63)))
6932*4882a593Smuzhiyun goto fail_set_dma_mask;
6933*4882a593Smuzhiyun }
6934*4882a593Smuzhiyun } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6935*4882a593Smuzhiyun goto fail_set_dma_mask;
6936*4882a593Smuzhiyun
6937*4882a593Smuzhiyun if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6938*4882a593Smuzhiyun instance->consistent_mask_64bit = false;
6939*4882a593Smuzhiyun else
6940*4882a593Smuzhiyun instance->consistent_mask_64bit = true;
6941*4882a593Smuzhiyun
6942*4882a593Smuzhiyun dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6943*4882a593Smuzhiyun ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6944*4882a593Smuzhiyun (instance->consistent_mask_64bit ? "63" : "32"));
6945*4882a593Smuzhiyun
6946*4882a593Smuzhiyun return 0;
6947*4882a593Smuzhiyun
6948*4882a593Smuzhiyun fail_set_dma_mask:
6949*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to set DMA mask\n");
6950*4882a593Smuzhiyun return -1;
6951*4882a593Smuzhiyun
6952*4882a593Smuzhiyun }
6953*4882a593Smuzhiyun
6954*4882a593Smuzhiyun /*
6955*4882a593Smuzhiyun * megasas_set_adapter_type - Set adapter type.
6956*4882a593Smuzhiyun * Supported controllers can be divided in
6957*4882a593Smuzhiyun * different categories-
6958*4882a593Smuzhiyun * enum MR_ADAPTER_TYPE {
6959*4882a593Smuzhiyun * MFI_SERIES = 1,
6960*4882a593Smuzhiyun * THUNDERBOLT_SERIES = 2,
6961*4882a593Smuzhiyun * INVADER_SERIES = 3,
6962*4882a593Smuzhiyun * VENTURA_SERIES = 4,
6963*4882a593Smuzhiyun * AERO_SERIES = 5,
6964*4882a593Smuzhiyun * };
6965*4882a593Smuzhiyun * @instance: Adapter soft state
6966*4882a593Smuzhiyun * return: void
6967*4882a593Smuzhiyun */
megasas_set_adapter_type(struct megasas_instance * instance)6968*4882a593Smuzhiyun static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6969*4882a593Smuzhiyun {
6970*4882a593Smuzhiyun if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6971*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6972*4882a593Smuzhiyun instance->adapter_type = MFI_SERIES;
6973*4882a593Smuzhiyun } else {
6974*4882a593Smuzhiyun switch (instance->pdev->device) {
6975*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E1:
6976*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E2:
6977*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E5:
6978*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E6:
6979*4882a593Smuzhiyun instance->adapter_type = AERO_SERIES;
6980*4882a593Smuzhiyun break;
6981*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_VENTURA:
6982*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_CRUSADER:
6983*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_HARPOON:
6984*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_TOMCAT:
6985*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6986*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6987*4882a593Smuzhiyun instance->adapter_type = VENTURA_SERIES;
6988*4882a593Smuzhiyun break;
6989*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_FUSION:
6990*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_PLASMA:
6991*4882a593Smuzhiyun instance->adapter_type = THUNDERBOLT_SERIES;
6992*4882a593Smuzhiyun break;
6993*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_INVADER:
6994*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_INTRUDER:
6995*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_INTRUDER_24:
6996*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_CUTLASS_52:
6997*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_CUTLASS_53:
6998*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_FURY:
6999*4882a593Smuzhiyun instance->adapter_type = INVADER_SERIES;
7000*4882a593Smuzhiyun break;
7001*4882a593Smuzhiyun default: /* For all other supported controllers */
7002*4882a593Smuzhiyun instance->adapter_type = MFI_SERIES;
7003*4882a593Smuzhiyun break;
7004*4882a593Smuzhiyun }
7005*4882a593Smuzhiyun }
7006*4882a593Smuzhiyun }
7007*4882a593Smuzhiyun
megasas_alloc_mfi_ctrl_mem(struct megasas_instance * instance)7008*4882a593Smuzhiyun static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
7009*4882a593Smuzhiyun {
7010*4882a593Smuzhiyun instance->producer = dma_alloc_coherent(&instance->pdev->dev,
7011*4882a593Smuzhiyun sizeof(u32), &instance->producer_h, GFP_KERNEL);
7012*4882a593Smuzhiyun instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
7013*4882a593Smuzhiyun sizeof(u32), &instance->consumer_h, GFP_KERNEL);
7014*4882a593Smuzhiyun
7015*4882a593Smuzhiyun if (!instance->producer || !instance->consumer) {
7016*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7017*4882a593Smuzhiyun "Failed to allocate memory for producer, consumer\n");
7018*4882a593Smuzhiyun return -1;
7019*4882a593Smuzhiyun }
7020*4882a593Smuzhiyun
7021*4882a593Smuzhiyun *instance->producer = 0;
7022*4882a593Smuzhiyun *instance->consumer = 0;
7023*4882a593Smuzhiyun return 0;
7024*4882a593Smuzhiyun }
7025*4882a593Smuzhiyun
7026*4882a593Smuzhiyun /**
7027*4882a593Smuzhiyun * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
7028*4882a593Smuzhiyun * structures which are not common across MFI
7029*4882a593Smuzhiyun * adapters and fusion adapters.
7030*4882a593Smuzhiyun * For MFI based adapters, allocate producer and
7031*4882a593Smuzhiyun * consumer buffers. For fusion adapters, allocate
7032*4882a593Smuzhiyun * memory for fusion context.
7033*4882a593Smuzhiyun * @instance: Adapter soft state
7034*4882a593Smuzhiyun * return: 0 for SUCCESS
7035*4882a593Smuzhiyun */
megasas_alloc_ctrl_mem(struct megasas_instance * instance)7036*4882a593Smuzhiyun static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
7037*4882a593Smuzhiyun {
7038*4882a593Smuzhiyun instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
7039*4882a593Smuzhiyun GFP_KERNEL);
7040*4882a593Smuzhiyun if (!instance->reply_map)
7041*4882a593Smuzhiyun return -ENOMEM;
7042*4882a593Smuzhiyun
7043*4882a593Smuzhiyun switch (instance->adapter_type) {
7044*4882a593Smuzhiyun case MFI_SERIES:
7045*4882a593Smuzhiyun if (megasas_alloc_mfi_ctrl_mem(instance))
7046*4882a593Smuzhiyun goto fail;
7047*4882a593Smuzhiyun break;
7048*4882a593Smuzhiyun case AERO_SERIES:
7049*4882a593Smuzhiyun case VENTURA_SERIES:
7050*4882a593Smuzhiyun case THUNDERBOLT_SERIES:
7051*4882a593Smuzhiyun case INVADER_SERIES:
7052*4882a593Smuzhiyun if (megasas_alloc_fusion_context(instance))
7053*4882a593Smuzhiyun goto fail;
7054*4882a593Smuzhiyun break;
7055*4882a593Smuzhiyun }
7056*4882a593Smuzhiyun
7057*4882a593Smuzhiyun return 0;
7058*4882a593Smuzhiyun fail:
7059*4882a593Smuzhiyun kfree(instance->reply_map);
7060*4882a593Smuzhiyun instance->reply_map = NULL;
7061*4882a593Smuzhiyun return -ENOMEM;
7062*4882a593Smuzhiyun }
7063*4882a593Smuzhiyun
7064*4882a593Smuzhiyun /*
7065*4882a593Smuzhiyun * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
7066*4882a593Smuzhiyun * producer, consumer buffers for MFI adapters
7067*4882a593Smuzhiyun *
7068*4882a593Smuzhiyun * @instance - Adapter soft instance
7069*4882a593Smuzhiyun *
7070*4882a593Smuzhiyun */
megasas_free_ctrl_mem(struct megasas_instance * instance)7071*4882a593Smuzhiyun static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7072*4882a593Smuzhiyun {
7073*4882a593Smuzhiyun kfree(instance->reply_map);
7074*4882a593Smuzhiyun if (instance->adapter_type == MFI_SERIES) {
7075*4882a593Smuzhiyun if (instance->producer)
7076*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7077*4882a593Smuzhiyun instance->producer,
7078*4882a593Smuzhiyun instance->producer_h);
7079*4882a593Smuzhiyun if (instance->consumer)
7080*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7081*4882a593Smuzhiyun instance->consumer,
7082*4882a593Smuzhiyun instance->consumer_h);
7083*4882a593Smuzhiyun } else {
7084*4882a593Smuzhiyun megasas_free_fusion_context(instance);
7085*4882a593Smuzhiyun }
7086*4882a593Smuzhiyun }
7087*4882a593Smuzhiyun
7088*4882a593Smuzhiyun /**
7089*4882a593Smuzhiyun * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
7090*4882a593Smuzhiyun * driver load time
7091*4882a593Smuzhiyun *
7092*4882a593Smuzhiyun * @instance: Adapter soft instance
7093*4882a593Smuzhiyun *
7094*4882a593Smuzhiyun * @return: O for SUCCESS
7095*4882a593Smuzhiyun */
7096*4882a593Smuzhiyun static inline
megasas_alloc_ctrl_dma_buffers(struct megasas_instance * instance)7097*4882a593Smuzhiyun int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7098*4882a593Smuzhiyun {
7099*4882a593Smuzhiyun struct pci_dev *pdev = instance->pdev;
7100*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
7101*4882a593Smuzhiyun
7102*4882a593Smuzhiyun instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7103*4882a593Smuzhiyun sizeof(struct megasas_evt_detail),
7104*4882a593Smuzhiyun &instance->evt_detail_h, GFP_KERNEL);
7105*4882a593Smuzhiyun
7106*4882a593Smuzhiyun if (!instance->evt_detail) {
7107*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7108*4882a593Smuzhiyun "Failed to allocate event detail buffer\n");
7109*4882a593Smuzhiyun return -ENOMEM;
7110*4882a593Smuzhiyun }
7111*4882a593Smuzhiyun
7112*4882a593Smuzhiyun if (fusion) {
7113*4882a593Smuzhiyun fusion->ioc_init_request =
7114*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7115*4882a593Smuzhiyun sizeof(struct MPI2_IOC_INIT_REQUEST),
7116*4882a593Smuzhiyun &fusion->ioc_init_request_phys,
7117*4882a593Smuzhiyun GFP_KERNEL);
7118*4882a593Smuzhiyun
7119*4882a593Smuzhiyun if (!fusion->ioc_init_request) {
7120*4882a593Smuzhiyun dev_err(&pdev->dev,
7121*4882a593Smuzhiyun "Failed to allocate PD list buffer\n");
7122*4882a593Smuzhiyun return -ENOMEM;
7123*4882a593Smuzhiyun }
7124*4882a593Smuzhiyun
7125*4882a593Smuzhiyun instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7126*4882a593Smuzhiyun sizeof(struct MR_SNAPDUMP_PROPERTIES),
7127*4882a593Smuzhiyun &instance->snapdump_prop_h, GFP_KERNEL);
7128*4882a593Smuzhiyun
7129*4882a593Smuzhiyun if (!instance->snapdump_prop)
7130*4882a593Smuzhiyun dev_err(&pdev->dev,
7131*4882a593Smuzhiyun "Failed to allocate snapdump properties buffer\n");
7132*4882a593Smuzhiyun
7133*4882a593Smuzhiyun instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7134*4882a593Smuzhiyun HOST_DEVICE_LIST_SZ,
7135*4882a593Smuzhiyun &instance->host_device_list_buf_h,
7136*4882a593Smuzhiyun GFP_KERNEL);
7137*4882a593Smuzhiyun
7138*4882a593Smuzhiyun if (!instance->host_device_list_buf) {
7139*4882a593Smuzhiyun dev_err(&pdev->dev,
7140*4882a593Smuzhiyun "Failed to allocate targetid list buffer\n");
7141*4882a593Smuzhiyun return -ENOMEM;
7142*4882a593Smuzhiyun }
7143*4882a593Smuzhiyun
7144*4882a593Smuzhiyun }
7145*4882a593Smuzhiyun
7146*4882a593Smuzhiyun instance->pd_list_buf =
7147*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7148*4882a593Smuzhiyun MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7149*4882a593Smuzhiyun &instance->pd_list_buf_h, GFP_KERNEL);
7150*4882a593Smuzhiyun
7151*4882a593Smuzhiyun if (!instance->pd_list_buf) {
7152*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7153*4882a593Smuzhiyun return -ENOMEM;
7154*4882a593Smuzhiyun }
7155*4882a593Smuzhiyun
7156*4882a593Smuzhiyun instance->ctrl_info_buf =
7157*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7158*4882a593Smuzhiyun sizeof(struct megasas_ctrl_info),
7159*4882a593Smuzhiyun &instance->ctrl_info_buf_h, GFP_KERNEL);
7160*4882a593Smuzhiyun
7161*4882a593Smuzhiyun if (!instance->ctrl_info_buf) {
7162*4882a593Smuzhiyun dev_err(&pdev->dev,
7163*4882a593Smuzhiyun "Failed to allocate controller info buffer\n");
7164*4882a593Smuzhiyun return -ENOMEM;
7165*4882a593Smuzhiyun }
7166*4882a593Smuzhiyun
7167*4882a593Smuzhiyun instance->ld_list_buf =
7168*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7169*4882a593Smuzhiyun sizeof(struct MR_LD_LIST),
7170*4882a593Smuzhiyun &instance->ld_list_buf_h, GFP_KERNEL);
7171*4882a593Smuzhiyun
7172*4882a593Smuzhiyun if (!instance->ld_list_buf) {
7173*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7174*4882a593Smuzhiyun return -ENOMEM;
7175*4882a593Smuzhiyun }
7176*4882a593Smuzhiyun
7177*4882a593Smuzhiyun instance->ld_targetid_list_buf =
7178*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7179*4882a593Smuzhiyun sizeof(struct MR_LD_TARGETID_LIST),
7180*4882a593Smuzhiyun &instance->ld_targetid_list_buf_h, GFP_KERNEL);
7181*4882a593Smuzhiyun
7182*4882a593Smuzhiyun if (!instance->ld_targetid_list_buf) {
7183*4882a593Smuzhiyun dev_err(&pdev->dev,
7184*4882a593Smuzhiyun "Failed to allocate LD targetid list buffer\n");
7185*4882a593Smuzhiyun return -ENOMEM;
7186*4882a593Smuzhiyun }
7187*4882a593Smuzhiyun
7188*4882a593Smuzhiyun if (!reset_devices) {
7189*4882a593Smuzhiyun instance->system_info_buf =
7190*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7191*4882a593Smuzhiyun sizeof(struct MR_DRV_SYSTEM_INFO),
7192*4882a593Smuzhiyun &instance->system_info_h, GFP_KERNEL);
7193*4882a593Smuzhiyun instance->pd_info =
7194*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7195*4882a593Smuzhiyun sizeof(struct MR_PD_INFO),
7196*4882a593Smuzhiyun &instance->pd_info_h, GFP_KERNEL);
7197*4882a593Smuzhiyun instance->tgt_prop =
7198*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7199*4882a593Smuzhiyun sizeof(struct MR_TARGET_PROPERTIES),
7200*4882a593Smuzhiyun &instance->tgt_prop_h, GFP_KERNEL);
7201*4882a593Smuzhiyun instance->crash_dump_buf =
7202*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7203*4882a593Smuzhiyun &instance->crash_dump_h, GFP_KERNEL);
7204*4882a593Smuzhiyun
7205*4882a593Smuzhiyun if (!instance->system_info_buf)
7206*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7207*4882a593Smuzhiyun "Failed to allocate system info buffer\n");
7208*4882a593Smuzhiyun
7209*4882a593Smuzhiyun if (!instance->pd_info)
7210*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7211*4882a593Smuzhiyun "Failed to allocate pd_info buffer\n");
7212*4882a593Smuzhiyun
7213*4882a593Smuzhiyun if (!instance->tgt_prop)
7214*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7215*4882a593Smuzhiyun "Failed to allocate tgt_prop buffer\n");
7216*4882a593Smuzhiyun
7217*4882a593Smuzhiyun if (!instance->crash_dump_buf)
7218*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7219*4882a593Smuzhiyun "Failed to allocate crash dump buffer\n");
7220*4882a593Smuzhiyun }
7221*4882a593Smuzhiyun
7222*4882a593Smuzhiyun return 0;
7223*4882a593Smuzhiyun }
7224*4882a593Smuzhiyun
7225*4882a593Smuzhiyun /*
7226*4882a593Smuzhiyun * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
7227*4882a593Smuzhiyun * during driver load time
7228*4882a593Smuzhiyun *
7229*4882a593Smuzhiyun * @instance- Adapter soft instance
7230*4882a593Smuzhiyun *
7231*4882a593Smuzhiyun */
7232*4882a593Smuzhiyun static inline
megasas_free_ctrl_dma_buffers(struct megasas_instance * instance)7233*4882a593Smuzhiyun void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7234*4882a593Smuzhiyun {
7235*4882a593Smuzhiyun struct pci_dev *pdev = instance->pdev;
7236*4882a593Smuzhiyun struct fusion_context *fusion = instance->ctrl_context;
7237*4882a593Smuzhiyun
7238*4882a593Smuzhiyun if (instance->evt_detail)
7239*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7240*4882a593Smuzhiyun instance->evt_detail,
7241*4882a593Smuzhiyun instance->evt_detail_h);
7242*4882a593Smuzhiyun
7243*4882a593Smuzhiyun if (fusion && fusion->ioc_init_request)
7244*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
7245*4882a593Smuzhiyun sizeof(struct MPI2_IOC_INIT_REQUEST),
7246*4882a593Smuzhiyun fusion->ioc_init_request,
7247*4882a593Smuzhiyun fusion->ioc_init_request_phys);
7248*4882a593Smuzhiyun
7249*4882a593Smuzhiyun if (instance->pd_list_buf)
7250*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
7251*4882a593Smuzhiyun MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7252*4882a593Smuzhiyun instance->pd_list_buf,
7253*4882a593Smuzhiyun instance->pd_list_buf_h);
7254*4882a593Smuzhiyun
7255*4882a593Smuzhiyun if (instance->ld_list_buf)
7256*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7257*4882a593Smuzhiyun instance->ld_list_buf,
7258*4882a593Smuzhiyun instance->ld_list_buf_h);
7259*4882a593Smuzhiyun
7260*4882a593Smuzhiyun if (instance->ld_targetid_list_buf)
7261*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7262*4882a593Smuzhiyun instance->ld_targetid_list_buf,
7263*4882a593Smuzhiyun instance->ld_targetid_list_buf_h);
7264*4882a593Smuzhiyun
7265*4882a593Smuzhiyun if (instance->ctrl_info_buf)
7266*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7267*4882a593Smuzhiyun instance->ctrl_info_buf,
7268*4882a593Smuzhiyun instance->ctrl_info_buf_h);
7269*4882a593Smuzhiyun
7270*4882a593Smuzhiyun if (instance->system_info_buf)
7271*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7272*4882a593Smuzhiyun instance->system_info_buf,
7273*4882a593Smuzhiyun instance->system_info_h);
7274*4882a593Smuzhiyun
7275*4882a593Smuzhiyun if (instance->pd_info)
7276*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7277*4882a593Smuzhiyun instance->pd_info, instance->pd_info_h);
7278*4882a593Smuzhiyun
7279*4882a593Smuzhiyun if (instance->tgt_prop)
7280*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7281*4882a593Smuzhiyun instance->tgt_prop, instance->tgt_prop_h);
7282*4882a593Smuzhiyun
7283*4882a593Smuzhiyun if (instance->crash_dump_buf)
7284*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7285*4882a593Smuzhiyun instance->crash_dump_buf,
7286*4882a593Smuzhiyun instance->crash_dump_h);
7287*4882a593Smuzhiyun
7288*4882a593Smuzhiyun if (instance->snapdump_prop)
7289*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
7290*4882a593Smuzhiyun sizeof(struct MR_SNAPDUMP_PROPERTIES),
7291*4882a593Smuzhiyun instance->snapdump_prop,
7292*4882a593Smuzhiyun instance->snapdump_prop_h);
7293*4882a593Smuzhiyun
7294*4882a593Smuzhiyun if (instance->host_device_list_buf)
7295*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
7296*4882a593Smuzhiyun HOST_DEVICE_LIST_SZ,
7297*4882a593Smuzhiyun instance->host_device_list_buf,
7298*4882a593Smuzhiyun instance->host_device_list_buf_h);
7299*4882a593Smuzhiyun
7300*4882a593Smuzhiyun }
7301*4882a593Smuzhiyun
7302*4882a593Smuzhiyun /*
7303*4882a593Smuzhiyun * megasas_init_ctrl_params - Initialize controller's instance
7304*4882a593Smuzhiyun * parameters before FW init
7305*4882a593Smuzhiyun * @instance - Adapter soft instance
7306*4882a593Smuzhiyun * @return - void
7307*4882a593Smuzhiyun */
megasas_init_ctrl_params(struct megasas_instance * instance)7308*4882a593Smuzhiyun static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7309*4882a593Smuzhiyun {
7310*4882a593Smuzhiyun instance->fw_crash_state = UNAVAILABLE;
7311*4882a593Smuzhiyun
7312*4882a593Smuzhiyun megasas_poll_wait_aen = 0;
7313*4882a593Smuzhiyun instance->issuepend_done = 1;
7314*4882a593Smuzhiyun atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7315*4882a593Smuzhiyun
7316*4882a593Smuzhiyun /*
7317*4882a593Smuzhiyun * Initialize locks and queues
7318*4882a593Smuzhiyun */
7319*4882a593Smuzhiyun INIT_LIST_HEAD(&instance->cmd_pool);
7320*4882a593Smuzhiyun INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7321*4882a593Smuzhiyun
7322*4882a593Smuzhiyun atomic_set(&instance->fw_outstanding, 0);
7323*4882a593Smuzhiyun atomic64_set(&instance->total_io_count, 0);
7324*4882a593Smuzhiyun
7325*4882a593Smuzhiyun init_waitqueue_head(&instance->int_cmd_wait_q);
7326*4882a593Smuzhiyun init_waitqueue_head(&instance->abort_cmd_wait_q);
7327*4882a593Smuzhiyun
7328*4882a593Smuzhiyun spin_lock_init(&instance->crashdump_lock);
7329*4882a593Smuzhiyun spin_lock_init(&instance->mfi_pool_lock);
7330*4882a593Smuzhiyun spin_lock_init(&instance->hba_lock);
7331*4882a593Smuzhiyun spin_lock_init(&instance->stream_lock);
7332*4882a593Smuzhiyun spin_lock_init(&instance->completion_lock);
7333*4882a593Smuzhiyun
7334*4882a593Smuzhiyun mutex_init(&instance->reset_mutex);
7335*4882a593Smuzhiyun
7336*4882a593Smuzhiyun if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7337*4882a593Smuzhiyun (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7338*4882a593Smuzhiyun instance->flag_ieee = 1;
7339*4882a593Smuzhiyun
7340*4882a593Smuzhiyun megasas_dbg_lvl = 0;
7341*4882a593Smuzhiyun instance->flag = 0;
7342*4882a593Smuzhiyun instance->unload = 1;
7343*4882a593Smuzhiyun instance->last_time = 0;
7344*4882a593Smuzhiyun instance->disableOnlineCtrlReset = 1;
7345*4882a593Smuzhiyun instance->UnevenSpanSupport = 0;
7346*4882a593Smuzhiyun instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7347*4882a593Smuzhiyun instance->msix_load_balance = false;
7348*4882a593Smuzhiyun
7349*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7350*4882a593Smuzhiyun INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7351*4882a593Smuzhiyun else
7352*4882a593Smuzhiyun INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7353*4882a593Smuzhiyun }
7354*4882a593Smuzhiyun
7355*4882a593Smuzhiyun /**
7356*4882a593Smuzhiyun * megasas_probe_one - PCI hotplug entry point
7357*4882a593Smuzhiyun * @pdev: PCI device structure
7358*4882a593Smuzhiyun * @id: PCI ids of supported hotplugged adapter
7359*4882a593Smuzhiyun */
megasas_probe_one(struct pci_dev * pdev,const struct pci_device_id * id)7360*4882a593Smuzhiyun static int megasas_probe_one(struct pci_dev *pdev,
7361*4882a593Smuzhiyun const struct pci_device_id *id)
7362*4882a593Smuzhiyun {
7363*4882a593Smuzhiyun int rval, pos;
7364*4882a593Smuzhiyun struct Scsi_Host *host;
7365*4882a593Smuzhiyun struct megasas_instance *instance;
7366*4882a593Smuzhiyun u16 control = 0;
7367*4882a593Smuzhiyun
7368*4882a593Smuzhiyun switch (pdev->device) {
7369*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E0:
7370*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E3:
7371*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E4:
7372*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E7:
7373*4882a593Smuzhiyun dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7374*4882a593Smuzhiyun return 1;
7375*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E1:
7376*4882a593Smuzhiyun case PCI_DEVICE_ID_LSI_AERO_10E5:
7377*4882a593Smuzhiyun dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7378*4882a593Smuzhiyun break;
7379*4882a593Smuzhiyun }
7380*4882a593Smuzhiyun
7381*4882a593Smuzhiyun /* Reset MSI-X in the kdump kernel */
7382*4882a593Smuzhiyun if (reset_devices) {
7383*4882a593Smuzhiyun pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7384*4882a593Smuzhiyun if (pos) {
7385*4882a593Smuzhiyun pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7386*4882a593Smuzhiyun &control);
7387*4882a593Smuzhiyun if (control & PCI_MSIX_FLAGS_ENABLE) {
7388*4882a593Smuzhiyun dev_info(&pdev->dev, "resetting MSI-X\n");
7389*4882a593Smuzhiyun pci_write_config_word(pdev,
7390*4882a593Smuzhiyun pos + PCI_MSIX_FLAGS,
7391*4882a593Smuzhiyun control &
7392*4882a593Smuzhiyun ~PCI_MSIX_FLAGS_ENABLE);
7393*4882a593Smuzhiyun }
7394*4882a593Smuzhiyun }
7395*4882a593Smuzhiyun }
7396*4882a593Smuzhiyun
7397*4882a593Smuzhiyun /*
7398*4882a593Smuzhiyun * PCI prepping: enable device set bus mastering and dma mask
7399*4882a593Smuzhiyun */
7400*4882a593Smuzhiyun rval = pci_enable_device_mem(pdev);
7401*4882a593Smuzhiyun
7402*4882a593Smuzhiyun if (rval) {
7403*4882a593Smuzhiyun return rval;
7404*4882a593Smuzhiyun }
7405*4882a593Smuzhiyun
7406*4882a593Smuzhiyun pci_set_master(pdev);
7407*4882a593Smuzhiyun
7408*4882a593Smuzhiyun host = scsi_host_alloc(&megasas_template,
7409*4882a593Smuzhiyun sizeof(struct megasas_instance));
7410*4882a593Smuzhiyun
7411*4882a593Smuzhiyun if (!host) {
7412*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7413*4882a593Smuzhiyun goto fail_alloc_instance;
7414*4882a593Smuzhiyun }
7415*4882a593Smuzhiyun
7416*4882a593Smuzhiyun instance = (struct megasas_instance *)host->hostdata;
7417*4882a593Smuzhiyun memset(instance, 0, sizeof(*instance));
7418*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 0);
7419*4882a593Smuzhiyun
7420*4882a593Smuzhiyun /*
7421*4882a593Smuzhiyun * Initialize PCI related and misc parameters
7422*4882a593Smuzhiyun */
7423*4882a593Smuzhiyun instance->pdev = pdev;
7424*4882a593Smuzhiyun instance->host = host;
7425*4882a593Smuzhiyun instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7426*4882a593Smuzhiyun instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7427*4882a593Smuzhiyun
7428*4882a593Smuzhiyun megasas_set_adapter_type(instance);
7429*4882a593Smuzhiyun
7430*4882a593Smuzhiyun /*
7431*4882a593Smuzhiyun * Initialize MFI Firmware
7432*4882a593Smuzhiyun */
7433*4882a593Smuzhiyun if (megasas_init_fw(instance))
7434*4882a593Smuzhiyun goto fail_init_mfi;
7435*4882a593Smuzhiyun
7436*4882a593Smuzhiyun if (instance->requestorId) {
7437*4882a593Smuzhiyun if (instance->PlasmaFW111) {
7438*4882a593Smuzhiyun instance->vf_affiliation_111 =
7439*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7440*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111),
7441*4882a593Smuzhiyun &instance->vf_affiliation_111_h,
7442*4882a593Smuzhiyun GFP_KERNEL);
7443*4882a593Smuzhiyun if (!instance->vf_affiliation_111)
7444*4882a593Smuzhiyun dev_warn(&pdev->dev, "Can't allocate "
7445*4882a593Smuzhiyun "memory for VF affiliation buffer\n");
7446*4882a593Smuzhiyun } else {
7447*4882a593Smuzhiyun instance->vf_affiliation =
7448*4882a593Smuzhiyun dma_alloc_coherent(&pdev->dev,
7449*4882a593Smuzhiyun (MAX_LOGICAL_DRIVES + 1) *
7450*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION),
7451*4882a593Smuzhiyun &instance->vf_affiliation_h,
7452*4882a593Smuzhiyun GFP_KERNEL);
7453*4882a593Smuzhiyun if (!instance->vf_affiliation)
7454*4882a593Smuzhiyun dev_warn(&pdev->dev, "Can't allocate "
7455*4882a593Smuzhiyun "memory for VF affiliation buffer\n");
7456*4882a593Smuzhiyun }
7457*4882a593Smuzhiyun }
7458*4882a593Smuzhiyun
7459*4882a593Smuzhiyun /*
7460*4882a593Smuzhiyun * Store instance in PCI softstate
7461*4882a593Smuzhiyun */
7462*4882a593Smuzhiyun pci_set_drvdata(pdev, instance);
7463*4882a593Smuzhiyun
7464*4882a593Smuzhiyun /*
7465*4882a593Smuzhiyun * Add this controller to megasas_mgmt_info structure so that it
7466*4882a593Smuzhiyun * can be exported to management applications
7467*4882a593Smuzhiyun */
7468*4882a593Smuzhiyun megasas_mgmt_info.count++;
7469*4882a593Smuzhiyun megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7470*4882a593Smuzhiyun megasas_mgmt_info.max_index++;
7471*4882a593Smuzhiyun
7472*4882a593Smuzhiyun /*
7473*4882a593Smuzhiyun * Register with SCSI mid-layer
7474*4882a593Smuzhiyun */
7475*4882a593Smuzhiyun if (megasas_io_attach(instance))
7476*4882a593Smuzhiyun goto fail_io_attach;
7477*4882a593Smuzhiyun
7478*4882a593Smuzhiyun instance->unload = 0;
7479*4882a593Smuzhiyun /*
7480*4882a593Smuzhiyun * Trigger SCSI to scan our drives
7481*4882a593Smuzhiyun */
7482*4882a593Smuzhiyun if (!instance->enable_fw_dev_list ||
7483*4882a593Smuzhiyun (instance->host_device_list_buf->count > 0))
7484*4882a593Smuzhiyun scsi_scan_host(host);
7485*4882a593Smuzhiyun
7486*4882a593Smuzhiyun /*
7487*4882a593Smuzhiyun * Initiate AEN (Asynchronous Event Notification)
7488*4882a593Smuzhiyun */
7489*4882a593Smuzhiyun if (megasas_start_aen(instance)) {
7490*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7491*4882a593Smuzhiyun goto fail_start_aen;
7492*4882a593Smuzhiyun }
7493*4882a593Smuzhiyun
7494*4882a593Smuzhiyun megasas_setup_debugfs(instance);
7495*4882a593Smuzhiyun
7496*4882a593Smuzhiyun /* Get current SR-IOV LD/VF affiliation */
7497*4882a593Smuzhiyun if (instance->requestorId)
7498*4882a593Smuzhiyun megasas_get_ld_vf_affiliation(instance, 1);
7499*4882a593Smuzhiyun
7500*4882a593Smuzhiyun return 0;
7501*4882a593Smuzhiyun
7502*4882a593Smuzhiyun fail_start_aen:
7503*4882a593Smuzhiyun instance->unload = 1;
7504*4882a593Smuzhiyun scsi_remove_host(instance->host);
7505*4882a593Smuzhiyun fail_io_attach:
7506*4882a593Smuzhiyun megasas_mgmt_info.count--;
7507*4882a593Smuzhiyun megasas_mgmt_info.max_index--;
7508*4882a593Smuzhiyun megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7509*4882a593Smuzhiyun
7510*4882a593Smuzhiyun if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7511*4882a593Smuzhiyun del_timer_sync(&instance->sriov_heartbeat_timer);
7512*4882a593Smuzhiyun
7513*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
7514*4882a593Smuzhiyun megasas_destroy_irqs(instance);
7515*4882a593Smuzhiyun
7516*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7517*4882a593Smuzhiyun megasas_release_fusion(instance);
7518*4882a593Smuzhiyun else
7519*4882a593Smuzhiyun megasas_release_mfi(instance);
7520*4882a593Smuzhiyun
7521*4882a593Smuzhiyun if (instance->msix_vectors)
7522*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
7523*4882a593Smuzhiyun instance->msix_vectors = 0;
7524*4882a593Smuzhiyun
7525*4882a593Smuzhiyun if (instance->fw_crash_state != UNAVAILABLE)
7526*4882a593Smuzhiyun megasas_free_host_crash_buffer(instance);
7527*4882a593Smuzhiyun
7528*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7529*4882a593Smuzhiyun megasas_fusion_stop_watchdog(instance);
7530*4882a593Smuzhiyun fail_init_mfi:
7531*4882a593Smuzhiyun scsi_host_put(host);
7532*4882a593Smuzhiyun fail_alloc_instance:
7533*4882a593Smuzhiyun pci_disable_device(pdev);
7534*4882a593Smuzhiyun
7535*4882a593Smuzhiyun return -ENODEV;
7536*4882a593Smuzhiyun }
7537*4882a593Smuzhiyun
7538*4882a593Smuzhiyun /**
7539*4882a593Smuzhiyun * megasas_flush_cache - Requests FW to flush all its caches
7540*4882a593Smuzhiyun * @instance: Adapter soft state
7541*4882a593Smuzhiyun */
megasas_flush_cache(struct megasas_instance * instance)7542*4882a593Smuzhiyun static void megasas_flush_cache(struct megasas_instance *instance)
7543*4882a593Smuzhiyun {
7544*4882a593Smuzhiyun struct megasas_cmd *cmd;
7545*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
7546*4882a593Smuzhiyun
7547*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7548*4882a593Smuzhiyun return;
7549*4882a593Smuzhiyun
7550*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
7551*4882a593Smuzhiyun
7552*4882a593Smuzhiyun if (!cmd)
7553*4882a593Smuzhiyun return;
7554*4882a593Smuzhiyun
7555*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
7556*4882a593Smuzhiyun
7557*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7558*4882a593Smuzhiyun
7559*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
7560*4882a593Smuzhiyun dcmd->cmd_status = 0x0;
7561*4882a593Smuzhiyun dcmd->sge_count = 0;
7562*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7563*4882a593Smuzhiyun dcmd->timeout = 0;
7564*4882a593Smuzhiyun dcmd->pad_0 = 0;
7565*4882a593Smuzhiyun dcmd->data_xfer_len = 0;
7566*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7567*4882a593Smuzhiyun dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7568*4882a593Smuzhiyun
7569*4882a593Smuzhiyun if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7570*4882a593Smuzhiyun != DCMD_SUCCESS) {
7571*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7572*4882a593Smuzhiyun "return from %s %d\n", __func__, __LINE__);
7573*4882a593Smuzhiyun return;
7574*4882a593Smuzhiyun }
7575*4882a593Smuzhiyun
7576*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
7577*4882a593Smuzhiyun }
7578*4882a593Smuzhiyun
7579*4882a593Smuzhiyun /**
7580*4882a593Smuzhiyun * megasas_shutdown_controller - Instructs FW to shutdown the controller
7581*4882a593Smuzhiyun * @instance: Adapter soft state
7582*4882a593Smuzhiyun * @opcode: Shutdown/Hibernate
7583*4882a593Smuzhiyun */
megasas_shutdown_controller(struct megasas_instance * instance,u32 opcode)7584*4882a593Smuzhiyun static void megasas_shutdown_controller(struct megasas_instance *instance,
7585*4882a593Smuzhiyun u32 opcode)
7586*4882a593Smuzhiyun {
7587*4882a593Smuzhiyun struct megasas_cmd *cmd;
7588*4882a593Smuzhiyun struct megasas_dcmd_frame *dcmd;
7589*4882a593Smuzhiyun
7590*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7591*4882a593Smuzhiyun return;
7592*4882a593Smuzhiyun
7593*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
7594*4882a593Smuzhiyun
7595*4882a593Smuzhiyun if (!cmd)
7596*4882a593Smuzhiyun return;
7597*4882a593Smuzhiyun
7598*4882a593Smuzhiyun if (instance->aen_cmd)
7599*4882a593Smuzhiyun megasas_issue_blocked_abort_cmd(instance,
7600*4882a593Smuzhiyun instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7601*4882a593Smuzhiyun if (instance->map_update_cmd)
7602*4882a593Smuzhiyun megasas_issue_blocked_abort_cmd(instance,
7603*4882a593Smuzhiyun instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7604*4882a593Smuzhiyun if (instance->jbod_seq_cmd)
7605*4882a593Smuzhiyun megasas_issue_blocked_abort_cmd(instance,
7606*4882a593Smuzhiyun instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7607*4882a593Smuzhiyun
7608*4882a593Smuzhiyun dcmd = &cmd->frame->dcmd;
7609*4882a593Smuzhiyun
7610*4882a593Smuzhiyun memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7611*4882a593Smuzhiyun
7612*4882a593Smuzhiyun dcmd->cmd = MFI_CMD_DCMD;
7613*4882a593Smuzhiyun dcmd->cmd_status = 0x0;
7614*4882a593Smuzhiyun dcmd->sge_count = 0;
7615*4882a593Smuzhiyun dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7616*4882a593Smuzhiyun dcmd->timeout = 0;
7617*4882a593Smuzhiyun dcmd->pad_0 = 0;
7618*4882a593Smuzhiyun dcmd->data_xfer_len = 0;
7619*4882a593Smuzhiyun dcmd->opcode = cpu_to_le32(opcode);
7620*4882a593Smuzhiyun
7621*4882a593Smuzhiyun if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7622*4882a593Smuzhiyun != DCMD_SUCCESS) {
7623*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
7624*4882a593Smuzhiyun "return from %s %d\n", __func__, __LINE__);
7625*4882a593Smuzhiyun return;
7626*4882a593Smuzhiyun }
7627*4882a593Smuzhiyun
7628*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
7629*4882a593Smuzhiyun }
7630*4882a593Smuzhiyun
7631*4882a593Smuzhiyun #ifdef CONFIG_PM
7632*4882a593Smuzhiyun /**
7633*4882a593Smuzhiyun * megasas_suspend - driver suspend entry point
7634*4882a593Smuzhiyun * @pdev: PCI device structure
7635*4882a593Smuzhiyun * @state: PCI power state to suspend routine
7636*4882a593Smuzhiyun */
7637*4882a593Smuzhiyun static int
megasas_suspend(struct pci_dev * pdev,pm_message_t state)7638*4882a593Smuzhiyun megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7639*4882a593Smuzhiyun {
7640*4882a593Smuzhiyun struct megasas_instance *instance;
7641*4882a593Smuzhiyun
7642*4882a593Smuzhiyun instance = pci_get_drvdata(pdev);
7643*4882a593Smuzhiyun
7644*4882a593Smuzhiyun if (!instance)
7645*4882a593Smuzhiyun return 0;
7646*4882a593Smuzhiyun
7647*4882a593Smuzhiyun instance->unload = 1;
7648*4882a593Smuzhiyun
7649*4882a593Smuzhiyun dev_info(&pdev->dev, "%s is called\n", __func__);
7650*4882a593Smuzhiyun
7651*4882a593Smuzhiyun /* Shutdown SR-IOV heartbeat timer */
7652*4882a593Smuzhiyun if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7653*4882a593Smuzhiyun del_timer_sync(&instance->sriov_heartbeat_timer);
7654*4882a593Smuzhiyun
7655*4882a593Smuzhiyun /* Stop the FW fault detection watchdog */
7656*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7657*4882a593Smuzhiyun megasas_fusion_stop_watchdog(instance);
7658*4882a593Smuzhiyun
7659*4882a593Smuzhiyun megasas_flush_cache(instance);
7660*4882a593Smuzhiyun megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7661*4882a593Smuzhiyun
7662*4882a593Smuzhiyun /* cancel the delayed work if this work still in queue */
7663*4882a593Smuzhiyun if (instance->ev != NULL) {
7664*4882a593Smuzhiyun struct megasas_aen_event *ev = instance->ev;
7665*4882a593Smuzhiyun cancel_delayed_work_sync(&ev->hotplug_work);
7666*4882a593Smuzhiyun instance->ev = NULL;
7667*4882a593Smuzhiyun }
7668*4882a593Smuzhiyun
7669*4882a593Smuzhiyun tasklet_kill(&instance->isr_tasklet);
7670*4882a593Smuzhiyun
7671*4882a593Smuzhiyun pci_set_drvdata(instance->pdev, instance);
7672*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
7673*4882a593Smuzhiyun
7674*4882a593Smuzhiyun megasas_destroy_irqs(instance);
7675*4882a593Smuzhiyun
7676*4882a593Smuzhiyun if (instance->msix_vectors)
7677*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
7678*4882a593Smuzhiyun
7679*4882a593Smuzhiyun pci_save_state(pdev);
7680*4882a593Smuzhiyun pci_disable_device(pdev);
7681*4882a593Smuzhiyun
7682*4882a593Smuzhiyun pci_set_power_state(pdev, pci_choose_state(pdev, state));
7683*4882a593Smuzhiyun
7684*4882a593Smuzhiyun return 0;
7685*4882a593Smuzhiyun }
7686*4882a593Smuzhiyun
7687*4882a593Smuzhiyun /**
7688*4882a593Smuzhiyun * megasas_resume- driver resume entry point
7689*4882a593Smuzhiyun * @pdev: PCI device structure
7690*4882a593Smuzhiyun */
7691*4882a593Smuzhiyun static int
megasas_resume(struct pci_dev * pdev)7692*4882a593Smuzhiyun megasas_resume(struct pci_dev *pdev)
7693*4882a593Smuzhiyun {
7694*4882a593Smuzhiyun int rval;
7695*4882a593Smuzhiyun struct Scsi_Host *host;
7696*4882a593Smuzhiyun struct megasas_instance *instance;
7697*4882a593Smuzhiyun u32 status_reg;
7698*4882a593Smuzhiyun
7699*4882a593Smuzhiyun instance = pci_get_drvdata(pdev);
7700*4882a593Smuzhiyun
7701*4882a593Smuzhiyun if (!instance)
7702*4882a593Smuzhiyun return 0;
7703*4882a593Smuzhiyun
7704*4882a593Smuzhiyun host = instance->host;
7705*4882a593Smuzhiyun pci_set_power_state(pdev, PCI_D0);
7706*4882a593Smuzhiyun pci_enable_wake(pdev, PCI_D0, 0);
7707*4882a593Smuzhiyun pci_restore_state(pdev);
7708*4882a593Smuzhiyun
7709*4882a593Smuzhiyun dev_info(&pdev->dev, "%s is called\n", __func__);
7710*4882a593Smuzhiyun /*
7711*4882a593Smuzhiyun * PCI prepping: enable device set bus mastering and dma mask
7712*4882a593Smuzhiyun */
7713*4882a593Smuzhiyun rval = pci_enable_device_mem(pdev);
7714*4882a593Smuzhiyun
7715*4882a593Smuzhiyun if (rval) {
7716*4882a593Smuzhiyun dev_err(&pdev->dev, "Enable device failed\n");
7717*4882a593Smuzhiyun return rval;
7718*4882a593Smuzhiyun }
7719*4882a593Smuzhiyun
7720*4882a593Smuzhiyun pci_set_master(pdev);
7721*4882a593Smuzhiyun
7722*4882a593Smuzhiyun /*
7723*4882a593Smuzhiyun * We expect the FW state to be READY
7724*4882a593Smuzhiyun */
7725*4882a593Smuzhiyun
7726*4882a593Smuzhiyun if (megasas_transition_to_ready(instance, 0)) {
7727*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
7728*4882a593Smuzhiyun "Failed to transition controller to ready from %s!\n",
7729*4882a593Smuzhiyun __func__);
7730*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
7731*4882a593Smuzhiyun status_reg =
7732*4882a593Smuzhiyun instance->instancet->read_fw_status_reg(instance);
7733*4882a593Smuzhiyun if (!(status_reg & MFI_RESET_ADAPTER) ||
7734*4882a593Smuzhiyun ((megasas_adp_reset_wait_for_ready
7735*4882a593Smuzhiyun (instance, true, 0)) == FAILED))
7736*4882a593Smuzhiyun goto fail_ready_state;
7737*4882a593Smuzhiyun } else {
7738*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 1);
7739*4882a593Smuzhiyun instance->instancet->adp_reset
7740*4882a593Smuzhiyun (instance, instance->reg_set);
7741*4882a593Smuzhiyun atomic_set(&instance->fw_reset_no_pci_access, 0);
7742*4882a593Smuzhiyun
7743*4882a593Smuzhiyun /* waiting for about 30 seconds before retry */
7744*4882a593Smuzhiyun ssleep(30);
7745*4882a593Smuzhiyun
7746*4882a593Smuzhiyun if (megasas_transition_to_ready(instance, 0))
7747*4882a593Smuzhiyun goto fail_ready_state;
7748*4882a593Smuzhiyun }
7749*4882a593Smuzhiyun
7750*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
7751*4882a593Smuzhiyun "FW restarted successfully from %s!\n",
7752*4882a593Smuzhiyun __func__);
7753*4882a593Smuzhiyun }
7754*4882a593Smuzhiyun if (megasas_set_dma_mask(instance))
7755*4882a593Smuzhiyun goto fail_set_dma_mask;
7756*4882a593Smuzhiyun
7757*4882a593Smuzhiyun /*
7758*4882a593Smuzhiyun * Initialize MFI Firmware
7759*4882a593Smuzhiyun */
7760*4882a593Smuzhiyun
7761*4882a593Smuzhiyun atomic_set(&instance->fw_outstanding, 0);
7762*4882a593Smuzhiyun atomic_set(&instance->ldio_outstanding, 0);
7763*4882a593Smuzhiyun
7764*4882a593Smuzhiyun /* Now re-enable MSI-X */
7765*4882a593Smuzhiyun if (instance->msix_vectors)
7766*4882a593Smuzhiyun megasas_alloc_irq_vectors(instance);
7767*4882a593Smuzhiyun
7768*4882a593Smuzhiyun if (!instance->msix_vectors) {
7769*4882a593Smuzhiyun rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7770*4882a593Smuzhiyun PCI_IRQ_LEGACY);
7771*4882a593Smuzhiyun if (rval < 0)
7772*4882a593Smuzhiyun goto fail_reenable_msix;
7773*4882a593Smuzhiyun }
7774*4882a593Smuzhiyun
7775*4882a593Smuzhiyun megasas_setup_reply_map(instance);
7776*4882a593Smuzhiyun
7777*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
7778*4882a593Smuzhiyun megasas_reset_reply_desc(instance);
7779*4882a593Smuzhiyun if (megasas_ioc_init_fusion(instance)) {
7780*4882a593Smuzhiyun megasas_free_cmds(instance);
7781*4882a593Smuzhiyun megasas_free_cmds_fusion(instance);
7782*4882a593Smuzhiyun goto fail_init_mfi;
7783*4882a593Smuzhiyun }
7784*4882a593Smuzhiyun if (!megasas_get_map_info(instance))
7785*4882a593Smuzhiyun megasas_sync_map_info(instance);
7786*4882a593Smuzhiyun } else {
7787*4882a593Smuzhiyun *instance->producer = 0;
7788*4882a593Smuzhiyun *instance->consumer = 0;
7789*4882a593Smuzhiyun if (megasas_issue_init_mfi(instance))
7790*4882a593Smuzhiyun goto fail_init_mfi;
7791*4882a593Smuzhiyun }
7792*4882a593Smuzhiyun
7793*4882a593Smuzhiyun if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7794*4882a593Smuzhiyun goto fail_init_mfi;
7795*4882a593Smuzhiyun
7796*4882a593Smuzhiyun tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7797*4882a593Smuzhiyun (unsigned long)instance);
7798*4882a593Smuzhiyun
7799*4882a593Smuzhiyun if (instance->msix_vectors ?
7800*4882a593Smuzhiyun megasas_setup_irqs_msix(instance, 0) :
7801*4882a593Smuzhiyun megasas_setup_irqs_ioapic(instance))
7802*4882a593Smuzhiyun goto fail_init_mfi;
7803*4882a593Smuzhiyun
7804*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7805*4882a593Smuzhiyun megasas_setup_irq_poll(instance);
7806*4882a593Smuzhiyun
7807*4882a593Smuzhiyun /* Re-launch SR-IOV heartbeat timer */
7808*4882a593Smuzhiyun if (instance->requestorId) {
7809*4882a593Smuzhiyun if (!megasas_sriov_start_heartbeat(instance, 0))
7810*4882a593Smuzhiyun megasas_start_timer(instance);
7811*4882a593Smuzhiyun else {
7812*4882a593Smuzhiyun instance->skip_heartbeat_timer_del = 1;
7813*4882a593Smuzhiyun goto fail_init_mfi;
7814*4882a593Smuzhiyun }
7815*4882a593Smuzhiyun }
7816*4882a593Smuzhiyun
7817*4882a593Smuzhiyun instance->instancet->enable_intr(instance);
7818*4882a593Smuzhiyun megasas_setup_jbod_map(instance);
7819*4882a593Smuzhiyun instance->unload = 0;
7820*4882a593Smuzhiyun
7821*4882a593Smuzhiyun /*
7822*4882a593Smuzhiyun * Initiate AEN (Asynchronous Event Notification)
7823*4882a593Smuzhiyun */
7824*4882a593Smuzhiyun if (megasas_start_aen(instance))
7825*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Start AEN failed\n");
7826*4882a593Smuzhiyun
7827*4882a593Smuzhiyun /* Re-launch FW fault watchdog */
7828*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7829*4882a593Smuzhiyun if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7830*4882a593Smuzhiyun goto fail_start_watchdog;
7831*4882a593Smuzhiyun
7832*4882a593Smuzhiyun return 0;
7833*4882a593Smuzhiyun
7834*4882a593Smuzhiyun fail_start_watchdog:
7835*4882a593Smuzhiyun if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7836*4882a593Smuzhiyun del_timer_sync(&instance->sriov_heartbeat_timer);
7837*4882a593Smuzhiyun fail_init_mfi:
7838*4882a593Smuzhiyun megasas_free_ctrl_dma_buffers(instance);
7839*4882a593Smuzhiyun megasas_free_ctrl_mem(instance);
7840*4882a593Smuzhiyun scsi_host_put(host);
7841*4882a593Smuzhiyun
7842*4882a593Smuzhiyun fail_reenable_msix:
7843*4882a593Smuzhiyun fail_set_dma_mask:
7844*4882a593Smuzhiyun fail_ready_state:
7845*4882a593Smuzhiyun
7846*4882a593Smuzhiyun pci_disable_device(pdev);
7847*4882a593Smuzhiyun
7848*4882a593Smuzhiyun return -ENODEV;
7849*4882a593Smuzhiyun }
7850*4882a593Smuzhiyun #else
7851*4882a593Smuzhiyun #define megasas_suspend NULL
7852*4882a593Smuzhiyun #define megasas_resume NULL
7853*4882a593Smuzhiyun #endif
7854*4882a593Smuzhiyun
7855*4882a593Smuzhiyun static inline int
megasas_wait_for_adapter_operational(struct megasas_instance * instance)7856*4882a593Smuzhiyun megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7857*4882a593Smuzhiyun {
7858*4882a593Smuzhiyun int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7859*4882a593Smuzhiyun int i;
7860*4882a593Smuzhiyun u8 adp_state;
7861*4882a593Smuzhiyun
7862*4882a593Smuzhiyun for (i = 0; i < wait_time; i++) {
7863*4882a593Smuzhiyun adp_state = atomic_read(&instance->adprecovery);
7864*4882a593Smuzhiyun if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7865*4882a593Smuzhiyun (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7866*4882a593Smuzhiyun break;
7867*4882a593Smuzhiyun
7868*4882a593Smuzhiyun if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7869*4882a593Smuzhiyun dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7870*4882a593Smuzhiyun
7871*4882a593Smuzhiyun msleep(1000);
7872*4882a593Smuzhiyun }
7873*4882a593Smuzhiyun
7874*4882a593Smuzhiyun if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7875*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
7876*4882a593Smuzhiyun "%s HBA failed to become operational, adp_state %d\n",
7877*4882a593Smuzhiyun __func__, adp_state);
7878*4882a593Smuzhiyun return 1;
7879*4882a593Smuzhiyun }
7880*4882a593Smuzhiyun
7881*4882a593Smuzhiyun return 0;
7882*4882a593Smuzhiyun }
7883*4882a593Smuzhiyun
7884*4882a593Smuzhiyun /**
7885*4882a593Smuzhiyun * megasas_detach_one - PCI hot"un"plug entry point
7886*4882a593Smuzhiyun * @pdev: PCI device structure
7887*4882a593Smuzhiyun */
megasas_detach_one(struct pci_dev * pdev)7888*4882a593Smuzhiyun static void megasas_detach_one(struct pci_dev *pdev)
7889*4882a593Smuzhiyun {
7890*4882a593Smuzhiyun int i;
7891*4882a593Smuzhiyun struct Scsi_Host *host;
7892*4882a593Smuzhiyun struct megasas_instance *instance;
7893*4882a593Smuzhiyun struct fusion_context *fusion;
7894*4882a593Smuzhiyun u32 pd_seq_map_sz;
7895*4882a593Smuzhiyun
7896*4882a593Smuzhiyun instance = pci_get_drvdata(pdev);
7897*4882a593Smuzhiyun
7898*4882a593Smuzhiyun if (!instance)
7899*4882a593Smuzhiyun return;
7900*4882a593Smuzhiyun
7901*4882a593Smuzhiyun host = instance->host;
7902*4882a593Smuzhiyun fusion = instance->ctrl_context;
7903*4882a593Smuzhiyun
7904*4882a593Smuzhiyun /* Shutdown SR-IOV heartbeat timer */
7905*4882a593Smuzhiyun if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7906*4882a593Smuzhiyun del_timer_sync(&instance->sriov_heartbeat_timer);
7907*4882a593Smuzhiyun
7908*4882a593Smuzhiyun /* Stop the FW fault detection watchdog */
7909*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES)
7910*4882a593Smuzhiyun megasas_fusion_stop_watchdog(instance);
7911*4882a593Smuzhiyun
7912*4882a593Smuzhiyun if (instance->fw_crash_state != UNAVAILABLE)
7913*4882a593Smuzhiyun megasas_free_host_crash_buffer(instance);
7914*4882a593Smuzhiyun scsi_remove_host(instance->host);
7915*4882a593Smuzhiyun instance->unload = 1;
7916*4882a593Smuzhiyun
7917*4882a593Smuzhiyun if (megasas_wait_for_adapter_operational(instance))
7918*4882a593Smuzhiyun goto skip_firing_dcmds;
7919*4882a593Smuzhiyun
7920*4882a593Smuzhiyun megasas_flush_cache(instance);
7921*4882a593Smuzhiyun megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7922*4882a593Smuzhiyun
7923*4882a593Smuzhiyun skip_firing_dcmds:
7924*4882a593Smuzhiyun /* cancel the delayed work if this work still in queue*/
7925*4882a593Smuzhiyun if (instance->ev != NULL) {
7926*4882a593Smuzhiyun struct megasas_aen_event *ev = instance->ev;
7927*4882a593Smuzhiyun cancel_delayed_work_sync(&ev->hotplug_work);
7928*4882a593Smuzhiyun instance->ev = NULL;
7929*4882a593Smuzhiyun }
7930*4882a593Smuzhiyun
7931*4882a593Smuzhiyun /* cancel all wait events */
7932*4882a593Smuzhiyun wake_up_all(&instance->int_cmd_wait_q);
7933*4882a593Smuzhiyun
7934*4882a593Smuzhiyun tasklet_kill(&instance->isr_tasklet);
7935*4882a593Smuzhiyun
7936*4882a593Smuzhiyun /*
7937*4882a593Smuzhiyun * Take the instance off the instance array. Note that we will not
7938*4882a593Smuzhiyun * decrement the max_index. We let this array be sparse array
7939*4882a593Smuzhiyun */
7940*4882a593Smuzhiyun for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7941*4882a593Smuzhiyun if (megasas_mgmt_info.instance[i] == instance) {
7942*4882a593Smuzhiyun megasas_mgmt_info.count--;
7943*4882a593Smuzhiyun megasas_mgmt_info.instance[i] = NULL;
7944*4882a593Smuzhiyun
7945*4882a593Smuzhiyun break;
7946*4882a593Smuzhiyun }
7947*4882a593Smuzhiyun }
7948*4882a593Smuzhiyun
7949*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
7950*4882a593Smuzhiyun
7951*4882a593Smuzhiyun megasas_destroy_irqs(instance);
7952*4882a593Smuzhiyun
7953*4882a593Smuzhiyun if (instance->msix_vectors)
7954*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
7955*4882a593Smuzhiyun
7956*4882a593Smuzhiyun if (instance->adapter_type >= VENTURA_SERIES) {
7957*4882a593Smuzhiyun for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7958*4882a593Smuzhiyun kfree(fusion->stream_detect_by_ld[i]);
7959*4882a593Smuzhiyun kfree(fusion->stream_detect_by_ld);
7960*4882a593Smuzhiyun fusion->stream_detect_by_ld = NULL;
7961*4882a593Smuzhiyun }
7962*4882a593Smuzhiyun
7963*4882a593Smuzhiyun
7964*4882a593Smuzhiyun if (instance->adapter_type != MFI_SERIES) {
7965*4882a593Smuzhiyun megasas_release_fusion(instance);
7966*4882a593Smuzhiyun pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7967*4882a593Smuzhiyun (sizeof(struct MR_PD_CFG_SEQ) *
7968*4882a593Smuzhiyun (MAX_PHYSICAL_DEVICES - 1));
7969*4882a593Smuzhiyun for (i = 0; i < 2 ; i++) {
7970*4882a593Smuzhiyun if (fusion->ld_map[i])
7971*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
7972*4882a593Smuzhiyun fusion->max_map_sz,
7973*4882a593Smuzhiyun fusion->ld_map[i],
7974*4882a593Smuzhiyun fusion->ld_map_phys[i]);
7975*4882a593Smuzhiyun if (fusion->ld_drv_map[i]) {
7976*4882a593Smuzhiyun if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7977*4882a593Smuzhiyun vfree(fusion->ld_drv_map[i]);
7978*4882a593Smuzhiyun else
7979*4882a593Smuzhiyun free_pages((ulong)fusion->ld_drv_map[i],
7980*4882a593Smuzhiyun fusion->drv_map_pages);
7981*4882a593Smuzhiyun }
7982*4882a593Smuzhiyun
7983*4882a593Smuzhiyun if (fusion->pd_seq_sync[i])
7984*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
7985*4882a593Smuzhiyun pd_seq_map_sz,
7986*4882a593Smuzhiyun fusion->pd_seq_sync[i],
7987*4882a593Smuzhiyun fusion->pd_seq_phys[i]);
7988*4882a593Smuzhiyun }
7989*4882a593Smuzhiyun } else {
7990*4882a593Smuzhiyun megasas_release_mfi(instance);
7991*4882a593Smuzhiyun }
7992*4882a593Smuzhiyun
7993*4882a593Smuzhiyun if (instance->vf_affiliation)
7994*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7995*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION),
7996*4882a593Smuzhiyun instance->vf_affiliation,
7997*4882a593Smuzhiyun instance->vf_affiliation_h);
7998*4882a593Smuzhiyun
7999*4882a593Smuzhiyun if (instance->vf_affiliation_111)
8000*4882a593Smuzhiyun dma_free_coherent(&pdev->dev,
8001*4882a593Smuzhiyun sizeof(struct MR_LD_VF_AFFILIATION_111),
8002*4882a593Smuzhiyun instance->vf_affiliation_111,
8003*4882a593Smuzhiyun instance->vf_affiliation_111_h);
8004*4882a593Smuzhiyun
8005*4882a593Smuzhiyun if (instance->hb_host_mem)
8006*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
8007*4882a593Smuzhiyun instance->hb_host_mem,
8008*4882a593Smuzhiyun instance->hb_host_mem_h);
8009*4882a593Smuzhiyun
8010*4882a593Smuzhiyun megasas_free_ctrl_dma_buffers(instance);
8011*4882a593Smuzhiyun
8012*4882a593Smuzhiyun megasas_free_ctrl_mem(instance);
8013*4882a593Smuzhiyun
8014*4882a593Smuzhiyun megasas_destroy_debugfs(instance);
8015*4882a593Smuzhiyun
8016*4882a593Smuzhiyun scsi_host_put(host);
8017*4882a593Smuzhiyun
8018*4882a593Smuzhiyun pci_disable_device(pdev);
8019*4882a593Smuzhiyun }
8020*4882a593Smuzhiyun
8021*4882a593Smuzhiyun /**
8022*4882a593Smuzhiyun * megasas_shutdown - Shutdown entry point
8023*4882a593Smuzhiyun * @pdev: Generic device structure
8024*4882a593Smuzhiyun */
megasas_shutdown(struct pci_dev * pdev)8025*4882a593Smuzhiyun static void megasas_shutdown(struct pci_dev *pdev)
8026*4882a593Smuzhiyun {
8027*4882a593Smuzhiyun struct megasas_instance *instance = pci_get_drvdata(pdev);
8028*4882a593Smuzhiyun
8029*4882a593Smuzhiyun if (!instance)
8030*4882a593Smuzhiyun return;
8031*4882a593Smuzhiyun
8032*4882a593Smuzhiyun instance->unload = 1;
8033*4882a593Smuzhiyun
8034*4882a593Smuzhiyun if (megasas_wait_for_adapter_operational(instance))
8035*4882a593Smuzhiyun goto skip_firing_dcmds;
8036*4882a593Smuzhiyun
8037*4882a593Smuzhiyun megasas_flush_cache(instance);
8038*4882a593Smuzhiyun megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
8039*4882a593Smuzhiyun
8040*4882a593Smuzhiyun skip_firing_dcmds:
8041*4882a593Smuzhiyun instance->instancet->disable_intr(instance);
8042*4882a593Smuzhiyun megasas_destroy_irqs(instance);
8043*4882a593Smuzhiyun
8044*4882a593Smuzhiyun if (instance->msix_vectors)
8045*4882a593Smuzhiyun pci_free_irq_vectors(instance->pdev);
8046*4882a593Smuzhiyun }
8047*4882a593Smuzhiyun
8048*4882a593Smuzhiyun /*
8049*4882a593Smuzhiyun * megasas_mgmt_open - char node "open" entry point
8050*4882a593Smuzhiyun * @inode: char node inode
8051*4882a593Smuzhiyun * @filep: char node file
8052*4882a593Smuzhiyun */
megasas_mgmt_open(struct inode * inode,struct file * filep)8053*4882a593Smuzhiyun static int megasas_mgmt_open(struct inode *inode, struct file *filep)
8054*4882a593Smuzhiyun {
8055*4882a593Smuzhiyun /*
8056*4882a593Smuzhiyun * Allow only those users with admin rights
8057*4882a593Smuzhiyun */
8058*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
8059*4882a593Smuzhiyun return -EACCES;
8060*4882a593Smuzhiyun
8061*4882a593Smuzhiyun return 0;
8062*4882a593Smuzhiyun }
8063*4882a593Smuzhiyun
8064*4882a593Smuzhiyun /*
8065*4882a593Smuzhiyun * megasas_mgmt_fasync - Async notifier registration from applications
8066*4882a593Smuzhiyun * @fd: char node file descriptor number
8067*4882a593Smuzhiyun * @filep: char node file
8068*4882a593Smuzhiyun * @mode: notifier on/off
8069*4882a593Smuzhiyun *
8070*4882a593Smuzhiyun * This function adds the calling process to a driver global queue. When an
8071*4882a593Smuzhiyun * event occurs, SIGIO will be sent to all processes in this queue.
8072*4882a593Smuzhiyun */
megasas_mgmt_fasync(int fd,struct file * filep,int mode)8073*4882a593Smuzhiyun static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8074*4882a593Smuzhiyun {
8075*4882a593Smuzhiyun int rc;
8076*4882a593Smuzhiyun
8077*4882a593Smuzhiyun mutex_lock(&megasas_async_queue_mutex);
8078*4882a593Smuzhiyun
8079*4882a593Smuzhiyun rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8080*4882a593Smuzhiyun
8081*4882a593Smuzhiyun mutex_unlock(&megasas_async_queue_mutex);
8082*4882a593Smuzhiyun
8083*4882a593Smuzhiyun if (rc >= 0) {
8084*4882a593Smuzhiyun /* For sanity check when we get ioctl */
8085*4882a593Smuzhiyun filep->private_data = filep;
8086*4882a593Smuzhiyun return 0;
8087*4882a593Smuzhiyun }
8088*4882a593Smuzhiyun
8089*4882a593Smuzhiyun printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8090*4882a593Smuzhiyun
8091*4882a593Smuzhiyun return rc;
8092*4882a593Smuzhiyun }
8093*4882a593Smuzhiyun
8094*4882a593Smuzhiyun /*
8095*4882a593Smuzhiyun * megasas_mgmt_poll - char node "poll" entry point
8096*4882a593Smuzhiyun * @filep: char node file
8097*4882a593Smuzhiyun * @wait: Events to poll for
8098*4882a593Smuzhiyun */
megasas_mgmt_poll(struct file * file,poll_table * wait)8099*4882a593Smuzhiyun static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8100*4882a593Smuzhiyun {
8101*4882a593Smuzhiyun __poll_t mask;
8102*4882a593Smuzhiyun unsigned long flags;
8103*4882a593Smuzhiyun
8104*4882a593Smuzhiyun poll_wait(file, &megasas_poll_wait, wait);
8105*4882a593Smuzhiyun spin_lock_irqsave(&poll_aen_lock, flags);
8106*4882a593Smuzhiyun if (megasas_poll_wait_aen)
8107*4882a593Smuzhiyun mask = (EPOLLIN | EPOLLRDNORM);
8108*4882a593Smuzhiyun else
8109*4882a593Smuzhiyun mask = 0;
8110*4882a593Smuzhiyun megasas_poll_wait_aen = 0;
8111*4882a593Smuzhiyun spin_unlock_irqrestore(&poll_aen_lock, flags);
8112*4882a593Smuzhiyun return mask;
8113*4882a593Smuzhiyun }
8114*4882a593Smuzhiyun
8115*4882a593Smuzhiyun /*
8116*4882a593Smuzhiyun * megasas_set_crash_dump_params_ioctl:
8117*4882a593Smuzhiyun * Send CRASH_DUMP_MODE DCMD to all controllers
8118*4882a593Smuzhiyun * @cmd: MFI command frame
8119*4882a593Smuzhiyun */
8120*4882a593Smuzhiyun
megasas_set_crash_dump_params_ioctl(struct megasas_cmd * cmd)8121*4882a593Smuzhiyun static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8122*4882a593Smuzhiyun {
8123*4882a593Smuzhiyun struct megasas_instance *local_instance;
8124*4882a593Smuzhiyun int i, error = 0;
8125*4882a593Smuzhiyun int crash_support;
8126*4882a593Smuzhiyun
8127*4882a593Smuzhiyun crash_support = cmd->frame->dcmd.mbox.w[0];
8128*4882a593Smuzhiyun
8129*4882a593Smuzhiyun for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8130*4882a593Smuzhiyun local_instance = megasas_mgmt_info.instance[i];
8131*4882a593Smuzhiyun if (local_instance && local_instance->crash_dump_drv_support) {
8132*4882a593Smuzhiyun if ((atomic_read(&local_instance->adprecovery) ==
8133*4882a593Smuzhiyun MEGASAS_HBA_OPERATIONAL) &&
8134*4882a593Smuzhiyun !megasas_set_crash_dump_params(local_instance,
8135*4882a593Smuzhiyun crash_support)) {
8136*4882a593Smuzhiyun local_instance->crash_dump_app_support =
8137*4882a593Smuzhiyun crash_support;
8138*4882a593Smuzhiyun dev_info(&local_instance->pdev->dev,
8139*4882a593Smuzhiyun "Application firmware crash "
8140*4882a593Smuzhiyun "dump mode set success\n");
8141*4882a593Smuzhiyun error = 0;
8142*4882a593Smuzhiyun } else {
8143*4882a593Smuzhiyun dev_info(&local_instance->pdev->dev,
8144*4882a593Smuzhiyun "Application firmware crash "
8145*4882a593Smuzhiyun "dump mode set failed\n");
8146*4882a593Smuzhiyun error = -1;
8147*4882a593Smuzhiyun }
8148*4882a593Smuzhiyun }
8149*4882a593Smuzhiyun }
8150*4882a593Smuzhiyun return error;
8151*4882a593Smuzhiyun }
8152*4882a593Smuzhiyun
8153*4882a593Smuzhiyun /**
8154*4882a593Smuzhiyun * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
8155*4882a593Smuzhiyun * @instance: Adapter soft state
8156*4882a593Smuzhiyun * @user_ioc: User's ioctl packet
8157*4882a593Smuzhiyun * @ioc: ioctl packet
8158*4882a593Smuzhiyun */
8159*4882a593Smuzhiyun static int
megasas_mgmt_fw_ioctl(struct megasas_instance * instance,struct megasas_iocpacket __user * user_ioc,struct megasas_iocpacket * ioc)8160*4882a593Smuzhiyun megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8161*4882a593Smuzhiyun struct megasas_iocpacket __user * user_ioc,
8162*4882a593Smuzhiyun struct megasas_iocpacket *ioc)
8163*4882a593Smuzhiyun {
8164*4882a593Smuzhiyun struct megasas_sge64 *kern_sge64 = NULL;
8165*4882a593Smuzhiyun struct megasas_sge32 *kern_sge32 = NULL;
8166*4882a593Smuzhiyun struct megasas_cmd *cmd;
8167*4882a593Smuzhiyun void *kbuff_arr[MAX_IOCTL_SGE];
8168*4882a593Smuzhiyun dma_addr_t buf_handle = 0;
8169*4882a593Smuzhiyun int error = 0, i;
8170*4882a593Smuzhiyun void *sense = NULL;
8171*4882a593Smuzhiyun dma_addr_t sense_handle;
8172*4882a593Smuzhiyun void *sense_ptr;
8173*4882a593Smuzhiyun u32 opcode = 0;
8174*4882a593Smuzhiyun int ret = DCMD_SUCCESS;
8175*4882a593Smuzhiyun
8176*4882a593Smuzhiyun memset(kbuff_arr, 0, sizeof(kbuff_arr));
8177*4882a593Smuzhiyun
8178*4882a593Smuzhiyun if (ioc->sge_count > MAX_IOCTL_SGE) {
8179*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
8180*4882a593Smuzhiyun ioc->sge_count, MAX_IOCTL_SGE);
8181*4882a593Smuzhiyun return -EINVAL;
8182*4882a593Smuzhiyun }
8183*4882a593Smuzhiyun
8184*4882a593Smuzhiyun if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8185*4882a593Smuzhiyun ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8186*4882a593Smuzhiyun !instance->support_nvme_passthru) ||
8187*4882a593Smuzhiyun ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8188*4882a593Smuzhiyun !instance->support_pci_lane_margining)) {
8189*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
8190*4882a593Smuzhiyun "Received invalid ioctl command 0x%x\n",
8191*4882a593Smuzhiyun ioc->frame.hdr.cmd);
8192*4882a593Smuzhiyun return -ENOTSUPP;
8193*4882a593Smuzhiyun }
8194*4882a593Smuzhiyun
8195*4882a593Smuzhiyun cmd = megasas_get_cmd(instance);
8196*4882a593Smuzhiyun if (!cmd) {
8197*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8198*4882a593Smuzhiyun return -ENOMEM;
8199*4882a593Smuzhiyun }
8200*4882a593Smuzhiyun
8201*4882a593Smuzhiyun /*
8202*4882a593Smuzhiyun * User's IOCTL packet has 2 frames (maximum). Copy those two
8203*4882a593Smuzhiyun * frames into our cmd's frames. cmd->frame's context will get
8204*4882a593Smuzhiyun * overwritten when we copy from user's frames. So set that value
8205*4882a593Smuzhiyun * alone separately
8206*4882a593Smuzhiyun */
8207*4882a593Smuzhiyun memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8208*4882a593Smuzhiyun cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8209*4882a593Smuzhiyun cmd->frame->hdr.pad_0 = 0;
8210*4882a593Smuzhiyun
8211*4882a593Smuzhiyun cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8212*4882a593Smuzhiyun
8213*4882a593Smuzhiyun if (instance->consistent_mask_64bit)
8214*4882a593Smuzhiyun cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8215*4882a593Smuzhiyun MFI_FRAME_SENSE64));
8216*4882a593Smuzhiyun else
8217*4882a593Smuzhiyun cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8218*4882a593Smuzhiyun MFI_FRAME_SENSE64));
8219*4882a593Smuzhiyun
8220*4882a593Smuzhiyun if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8221*4882a593Smuzhiyun opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8222*4882a593Smuzhiyun
8223*4882a593Smuzhiyun if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8224*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
8225*4882a593Smuzhiyun if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8226*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
8227*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8228*4882a593Smuzhiyun return -1;
8229*4882a593Smuzhiyun }
8230*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8231*4882a593Smuzhiyun }
8232*4882a593Smuzhiyun
8233*4882a593Smuzhiyun if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8234*4882a593Smuzhiyun error = megasas_set_crash_dump_params_ioctl(cmd);
8235*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
8236*4882a593Smuzhiyun return error;
8237*4882a593Smuzhiyun }
8238*4882a593Smuzhiyun
8239*4882a593Smuzhiyun /*
8240*4882a593Smuzhiyun * The management interface between applications and the fw uses
8241*4882a593Smuzhiyun * MFI frames. E.g, RAID configuration changes, LD property changes
8242*4882a593Smuzhiyun * etc are accomplishes through different kinds of MFI frames. The
8243*4882a593Smuzhiyun * driver needs to care only about substituting user buffers with
8244*4882a593Smuzhiyun * kernel buffers in SGLs. The location of SGL is embedded in the
8245*4882a593Smuzhiyun * struct iocpacket itself.
8246*4882a593Smuzhiyun */
8247*4882a593Smuzhiyun if (instance->consistent_mask_64bit)
8248*4882a593Smuzhiyun kern_sge64 = (struct megasas_sge64 *)
8249*4882a593Smuzhiyun ((unsigned long)cmd->frame + ioc->sgl_off);
8250*4882a593Smuzhiyun else
8251*4882a593Smuzhiyun kern_sge32 = (struct megasas_sge32 *)
8252*4882a593Smuzhiyun ((unsigned long)cmd->frame + ioc->sgl_off);
8253*4882a593Smuzhiyun
8254*4882a593Smuzhiyun /*
8255*4882a593Smuzhiyun * For each user buffer, create a mirror buffer and copy in
8256*4882a593Smuzhiyun */
8257*4882a593Smuzhiyun for (i = 0; i < ioc->sge_count; i++) {
8258*4882a593Smuzhiyun if (!ioc->sgl[i].iov_len)
8259*4882a593Smuzhiyun continue;
8260*4882a593Smuzhiyun
8261*4882a593Smuzhiyun kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8262*4882a593Smuzhiyun ioc->sgl[i].iov_len,
8263*4882a593Smuzhiyun &buf_handle, GFP_KERNEL);
8264*4882a593Smuzhiyun if (!kbuff_arr[i]) {
8265*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8266*4882a593Smuzhiyun "kernel SGL buffer for IOCTL\n");
8267*4882a593Smuzhiyun error = -ENOMEM;
8268*4882a593Smuzhiyun goto out;
8269*4882a593Smuzhiyun }
8270*4882a593Smuzhiyun
8271*4882a593Smuzhiyun /*
8272*4882a593Smuzhiyun * We don't change the dma_coherent_mask, so
8273*4882a593Smuzhiyun * dma_alloc_coherent only returns 32bit addresses
8274*4882a593Smuzhiyun */
8275*4882a593Smuzhiyun if (instance->consistent_mask_64bit) {
8276*4882a593Smuzhiyun kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8277*4882a593Smuzhiyun kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8278*4882a593Smuzhiyun } else {
8279*4882a593Smuzhiyun kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8280*4882a593Smuzhiyun kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8281*4882a593Smuzhiyun }
8282*4882a593Smuzhiyun
8283*4882a593Smuzhiyun /*
8284*4882a593Smuzhiyun * We created a kernel buffer corresponding to the
8285*4882a593Smuzhiyun * user buffer. Now copy in from the user buffer
8286*4882a593Smuzhiyun */
8287*4882a593Smuzhiyun if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8288*4882a593Smuzhiyun (u32) (ioc->sgl[i].iov_len))) {
8289*4882a593Smuzhiyun error = -EFAULT;
8290*4882a593Smuzhiyun goto out;
8291*4882a593Smuzhiyun }
8292*4882a593Smuzhiyun }
8293*4882a593Smuzhiyun
8294*4882a593Smuzhiyun if (ioc->sense_len) {
8295*4882a593Smuzhiyun /* make sure the pointer is part of the frame */
8296*4882a593Smuzhiyun if (ioc->sense_off >
8297*4882a593Smuzhiyun (sizeof(union megasas_frame) - sizeof(__le64))) {
8298*4882a593Smuzhiyun error = -EINVAL;
8299*4882a593Smuzhiyun goto out;
8300*4882a593Smuzhiyun }
8301*4882a593Smuzhiyun
8302*4882a593Smuzhiyun sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8303*4882a593Smuzhiyun &sense_handle, GFP_KERNEL);
8304*4882a593Smuzhiyun if (!sense) {
8305*4882a593Smuzhiyun error = -ENOMEM;
8306*4882a593Smuzhiyun goto out;
8307*4882a593Smuzhiyun }
8308*4882a593Smuzhiyun
8309*4882a593Smuzhiyun /* always store 64 bits regardless of addressing */
8310*4882a593Smuzhiyun sense_ptr = (void *)cmd->frame + ioc->sense_off;
8311*4882a593Smuzhiyun put_unaligned_le64(sense_handle, sense_ptr);
8312*4882a593Smuzhiyun }
8313*4882a593Smuzhiyun
8314*4882a593Smuzhiyun /*
8315*4882a593Smuzhiyun * Set the sync_cmd flag so that the ISR knows not to complete this
8316*4882a593Smuzhiyun * cmd to the SCSI mid-layer
8317*4882a593Smuzhiyun */
8318*4882a593Smuzhiyun cmd->sync_cmd = 1;
8319*4882a593Smuzhiyun
8320*4882a593Smuzhiyun ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8321*4882a593Smuzhiyun switch (ret) {
8322*4882a593Smuzhiyun case DCMD_INIT:
8323*4882a593Smuzhiyun case DCMD_BUSY:
8324*4882a593Smuzhiyun cmd->sync_cmd = 0;
8325*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
8326*4882a593Smuzhiyun "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8327*4882a593Smuzhiyun __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8328*4882a593Smuzhiyun cmd->cmd_status_drv);
8329*4882a593Smuzhiyun error = -EBUSY;
8330*4882a593Smuzhiyun goto out;
8331*4882a593Smuzhiyun }
8332*4882a593Smuzhiyun
8333*4882a593Smuzhiyun cmd->sync_cmd = 0;
8334*4882a593Smuzhiyun
8335*4882a593Smuzhiyun if (instance->unload == 1) {
8336*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "Driver unload is in progress "
8337*4882a593Smuzhiyun "don't submit data to application\n");
8338*4882a593Smuzhiyun goto out;
8339*4882a593Smuzhiyun }
8340*4882a593Smuzhiyun /*
8341*4882a593Smuzhiyun * copy out the kernel buffers to user buffers
8342*4882a593Smuzhiyun */
8343*4882a593Smuzhiyun for (i = 0; i < ioc->sge_count; i++) {
8344*4882a593Smuzhiyun if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8345*4882a593Smuzhiyun ioc->sgl[i].iov_len)) {
8346*4882a593Smuzhiyun error = -EFAULT;
8347*4882a593Smuzhiyun goto out;
8348*4882a593Smuzhiyun }
8349*4882a593Smuzhiyun }
8350*4882a593Smuzhiyun
8351*4882a593Smuzhiyun /*
8352*4882a593Smuzhiyun * copy out the sense
8353*4882a593Smuzhiyun */
8354*4882a593Smuzhiyun if (ioc->sense_len) {
8355*4882a593Smuzhiyun /*
8356*4882a593Smuzhiyun * sense_ptr points to the location that has the user
8357*4882a593Smuzhiyun * sense buffer address
8358*4882a593Smuzhiyun */
8359*4882a593Smuzhiyun sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8360*4882a593Smuzhiyun ioc->sense_off);
8361*4882a593Smuzhiyun
8362*4882a593Smuzhiyun if (copy_to_user((void __user *)((unsigned long)
8363*4882a593Smuzhiyun get_unaligned((unsigned long *)sense_ptr)),
8364*4882a593Smuzhiyun sense, ioc->sense_len)) {
8365*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Failed to copy out to user "
8366*4882a593Smuzhiyun "sense data\n");
8367*4882a593Smuzhiyun error = -EFAULT;
8368*4882a593Smuzhiyun goto out;
8369*4882a593Smuzhiyun }
8370*4882a593Smuzhiyun }
8371*4882a593Smuzhiyun
8372*4882a593Smuzhiyun /*
8373*4882a593Smuzhiyun * copy the status codes returned by the fw
8374*4882a593Smuzhiyun */
8375*4882a593Smuzhiyun if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8376*4882a593Smuzhiyun &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8377*4882a593Smuzhiyun dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8378*4882a593Smuzhiyun error = -EFAULT;
8379*4882a593Smuzhiyun }
8380*4882a593Smuzhiyun
8381*4882a593Smuzhiyun out:
8382*4882a593Smuzhiyun if (sense) {
8383*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8384*4882a593Smuzhiyun sense, sense_handle);
8385*4882a593Smuzhiyun }
8386*4882a593Smuzhiyun
8387*4882a593Smuzhiyun for (i = 0; i < ioc->sge_count; i++) {
8388*4882a593Smuzhiyun if (kbuff_arr[i]) {
8389*4882a593Smuzhiyun if (instance->consistent_mask_64bit)
8390*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
8391*4882a593Smuzhiyun le32_to_cpu(kern_sge64[i].length),
8392*4882a593Smuzhiyun kbuff_arr[i],
8393*4882a593Smuzhiyun le64_to_cpu(kern_sge64[i].phys_addr));
8394*4882a593Smuzhiyun else
8395*4882a593Smuzhiyun dma_free_coherent(&instance->pdev->dev,
8396*4882a593Smuzhiyun le32_to_cpu(kern_sge32[i].length),
8397*4882a593Smuzhiyun kbuff_arr[i],
8398*4882a593Smuzhiyun le32_to_cpu(kern_sge32[i].phys_addr));
8399*4882a593Smuzhiyun kbuff_arr[i] = NULL;
8400*4882a593Smuzhiyun }
8401*4882a593Smuzhiyun }
8402*4882a593Smuzhiyun
8403*4882a593Smuzhiyun megasas_return_cmd(instance, cmd);
8404*4882a593Smuzhiyun return error;
8405*4882a593Smuzhiyun }
8406*4882a593Smuzhiyun
megasas_mgmt_ioctl_fw(struct file * file,unsigned long arg)8407*4882a593Smuzhiyun static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8408*4882a593Smuzhiyun {
8409*4882a593Smuzhiyun struct megasas_iocpacket __user *user_ioc =
8410*4882a593Smuzhiyun (struct megasas_iocpacket __user *)arg;
8411*4882a593Smuzhiyun struct megasas_iocpacket *ioc;
8412*4882a593Smuzhiyun struct megasas_instance *instance;
8413*4882a593Smuzhiyun int error;
8414*4882a593Smuzhiyun
8415*4882a593Smuzhiyun ioc = memdup_user(user_ioc, sizeof(*ioc));
8416*4882a593Smuzhiyun if (IS_ERR(ioc))
8417*4882a593Smuzhiyun return PTR_ERR(ioc);
8418*4882a593Smuzhiyun
8419*4882a593Smuzhiyun instance = megasas_lookup_instance(ioc->host_no);
8420*4882a593Smuzhiyun if (!instance) {
8421*4882a593Smuzhiyun error = -ENODEV;
8422*4882a593Smuzhiyun goto out_kfree_ioc;
8423*4882a593Smuzhiyun }
8424*4882a593Smuzhiyun
8425*4882a593Smuzhiyun /* Block ioctls in VF mode */
8426*4882a593Smuzhiyun if (instance->requestorId && !allow_vf_ioctls) {
8427*4882a593Smuzhiyun error = -ENODEV;
8428*4882a593Smuzhiyun goto out_kfree_ioc;
8429*4882a593Smuzhiyun }
8430*4882a593Smuzhiyun
8431*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8432*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "Controller in crit error\n");
8433*4882a593Smuzhiyun error = -ENODEV;
8434*4882a593Smuzhiyun goto out_kfree_ioc;
8435*4882a593Smuzhiyun }
8436*4882a593Smuzhiyun
8437*4882a593Smuzhiyun if (instance->unload == 1) {
8438*4882a593Smuzhiyun error = -ENODEV;
8439*4882a593Smuzhiyun goto out_kfree_ioc;
8440*4882a593Smuzhiyun }
8441*4882a593Smuzhiyun
8442*4882a593Smuzhiyun if (down_interruptible(&instance->ioctl_sem)) {
8443*4882a593Smuzhiyun error = -ERESTARTSYS;
8444*4882a593Smuzhiyun goto out_kfree_ioc;
8445*4882a593Smuzhiyun }
8446*4882a593Smuzhiyun
8447*4882a593Smuzhiyun if (megasas_wait_for_adapter_operational(instance)) {
8448*4882a593Smuzhiyun error = -ENODEV;
8449*4882a593Smuzhiyun goto out_up;
8450*4882a593Smuzhiyun }
8451*4882a593Smuzhiyun
8452*4882a593Smuzhiyun error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8453*4882a593Smuzhiyun out_up:
8454*4882a593Smuzhiyun up(&instance->ioctl_sem);
8455*4882a593Smuzhiyun
8456*4882a593Smuzhiyun out_kfree_ioc:
8457*4882a593Smuzhiyun kfree(ioc);
8458*4882a593Smuzhiyun return error;
8459*4882a593Smuzhiyun }
8460*4882a593Smuzhiyun
megasas_mgmt_ioctl_aen(struct file * file,unsigned long arg)8461*4882a593Smuzhiyun static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8462*4882a593Smuzhiyun {
8463*4882a593Smuzhiyun struct megasas_instance *instance;
8464*4882a593Smuzhiyun struct megasas_aen aen;
8465*4882a593Smuzhiyun int error;
8466*4882a593Smuzhiyun
8467*4882a593Smuzhiyun if (file->private_data != file) {
8468*4882a593Smuzhiyun printk(KERN_DEBUG "megasas: fasync_helper was not "
8469*4882a593Smuzhiyun "called first\n");
8470*4882a593Smuzhiyun return -EINVAL;
8471*4882a593Smuzhiyun }
8472*4882a593Smuzhiyun
8473*4882a593Smuzhiyun if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8474*4882a593Smuzhiyun return -EFAULT;
8475*4882a593Smuzhiyun
8476*4882a593Smuzhiyun instance = megasas_lookup_instance(aen.host_no);
8477*4882a593Smuzhiyun
8478*4882a593Smuzhiyun if (!instance)
8479*4882a593Smuzhiyun return -ENODEV;
8480*4882a593Smuzhiyun
8481*4882a593Smuzhiyun if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8482*4882a593Smuzhiyun return -ENODEV;
8483*4882a593Smuzhiyun }
8484*4882a593Smuzhiyun
8485*4882a593Smuzhiyun if (instance->unload == 1) {
8486*4882a593Smuzhiyun return -ENODEV;
8487*4882a593Smuzhiyun }
8488*4882a593Smuzhiyun
8489*4882a593Smuzhiyun if (megasas_wait_for_adapter_operational(instance))
8490*4882a593Smuzhiyun return -ENODEV;
8491*4882a593Smuzhiyun
8492*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
8493*4882a593Smuzhiyun error = megasas_register_aen(instance, aen.seq_num,
8494*4882a593Smuzhiyun aen.class_locale_word);
8495*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8496*4882a593Smuzhiyun return error;
8497*4882a593Smuzhiyun }
8498*4882a593Smuzhiyun
8499*4882a593Smuzhiyun /**
8500*4882a593Smuzhiyun * megasas_mgmt_ioctl - char node ioctl entry point
8501*4882a593Smuzhiyun * @file: char device file pointer
8502*4882a593Smuzhiyun * @cmd: ioctl command
8503*4882a593Smuzhiyun * @arg: ioctl command arguments address
8504*4882a593Smuzhiyun */
8505*4882a593Smuzhiyun static long
megasas_mgmt_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8506*4882a593Smuzhiyun megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8507*4882a593Smuzhiyun {
8508*4882a593Smuzhiyun switch (cmd) {
8509*4882a593Smuzhiyun case MEGASAS_IOC_FIRMWARE:
8510*4882a593Smuzhiyun return megasas_mgmt_ioctl_fw(file, arg);
8511*4882a593Smuzhiyun
8512*4882a593Smuzhiyun case MEGASAS_IOC_GET_AEN:
8513*4882a593Smuzhiyun return megasas_mgmt_ioctl_aen(file, arg);
8514*4882a593Smuzhiyun }
8515*4882a593Smuzhiyun
8516*4882a593Smuzhiyun return -ENOTTY;
8517*4882a593Smuzhiyun }
8518*4882a593Smuzhiyun
8519*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
megasas_mgmt_compat_ioctl_fw(struct file * file,unsigned long arg)8520*4882a593Smuzhiyun static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8521*4882a593Smuzhiyun {
8522*4882a593Smuzhiyun struct compat_megasas_iocpacket __user *cioc =
8523*4882a593Smuzhiyun (struct compat_megasas_iocpacket __user *)arg;
8524*4882a593Smuzhiyun struct megasas_iocpacket __user *ioc =
8525*4882a593Smuzhiyun compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8526*4882a593Smuzhiyun int i;
8527*4882a593Smuzhiyun int error = 0;
8528*4882a593Smuzhiyun compat_uptr_t ptr;
8529*4882a593Smuzhiyun u32 local_sense_off;
8530*4882a593Smuzhiyun u32 local_sense_len;
8531*4882a593Smuzhiyun u32 user_sense_off;
8532*4882a593Smuzhiyun
8533*4882a593Smuzhiyun if (clear_user(ioc, sizeof(*ioc)))
8534*4882a593Smuzhiyun return -EFAULT;
8535*4882a593Smuzhiyun
8536*4882a593Smuzhiyun if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8537*4882a593Smuzhiyun copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8538*4882a593Smuzhiyun copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8539*4882a593Smuzhiyun copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8540*4882a593Smuzhiyun copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8541*4882a593Smuzhiyun copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8542*4882a593Smuzhiyun return -EFAULT;
8543*4882a593Smuzhiyun
8544*4882a593Smuzhiyun /*
8545*4882a593Smuzhiyun * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8546*4882a593Smuzhiyun * sense_len is not null, so prepare the 64bit value under
8547*4882a593Smuzhiyun * the same condition.
8548*4882a593Smuzhiyun */
8549*4882a593Smuzhiyun if (get_user(local_sense_off, &ioc->sense_off) ||
8550*4882a593Smuzhiyun get_user(local_sense_len, &ioc->sense_len) ||
8551*4882a593Smuzhiyun get_user(user_sense_off, &cioc->sense_off))
8552*4882a593Smuzhiyun return -EFAULT;
8553*4882a593Smuzhiyun
8554*4882a593Smuzhiyun if (local_sense_off != user_sense_off)
8555*4882a593Smuzhiyun return -EINVAL;
8556*4882a593Smuzhiyun
8557*4882a593Smuzhiyun if (local_sense_len) {
8558*4882a593Smuzhiyun void __user **sense_ioc_ptr =
8559*4882a593Smuzhiyun (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8560*4882a593Smuzhiyun compat_uptr_t *sense_cioc_ptr =
8561*4882a593Smuzhiyun (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8562*4882a593Smuzhiyun if (get_user(ptr, sense_cioc_ptr) ||
8563*4882a593Smuzhiyun put_user(compat_ptr(ptr), sense_ioc_ptr))
8564*4882a593Smuzhiyun return -EFAULT;
8565*4882a593Smuzhiyun }
8566*4882a593Smuzhiyun
8567*4882a593Smuzhiyun for (i = 0; i < MAX_IOCTL_SGE; i++) {
8568*4882a593Smuzhiyun if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8569*4882a593Smuzhiyun put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8570*4882a593Smuzhiyun copy_in_user(&ioc->sgl[i].iov_len,
8571*4882a593Smuzhiyun &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8572*4882a593Smuzhiyun return -EFAULT;
8573*4882a593Smuzhiyun }
8574*4882a593Smuzhiyun
8575*4882a593Smuzhiyun error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8576*4882a593Smuzhiyun
8577*4882a593Smuzhiyun if (copy_in_user(&cioc->frame.hdr.cmd_status,
8578*4882a593Smuzhiyun &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8579*4882a593Smuzhiyun printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8580*4882a593Smuzhiyun return -EFAULT;
8581*4882a593Smuzhiyun }
8582*4882a593Smuzhiyun return error;
8583*4882a593Smuzhiyun }
8584*4882a593Smuzhiyun
8585*4882a593Smuzhiyun static long
megasas_mgmt_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)8586*4882a593Smuzhiyun megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8587*4882a593Smuzhiyun unsigned long arg)
8588*4882a593Smuzhiyun {
8589*4882a593Smuzhiyun switch (cmd) {
8590*4882a593Smuzhiyun case MEGASAS_IOC_FIRMWARE32:
8591*4882a593Smuzhiyun return megasas_mgmt_compat_ioctl_fw(file, arg);
8592*4882a593Smuzhiyun case MEGASAS_IOC_GET_AEN:
8593*4882a593Smuzhiyun return megasas_mgmt_ioctl_aen(file, arg);
8594*4882a593Smuzhiyun }
8595*4882a593Smuzhiyun
8596*4882a593Smuzhiyun return -ENOTTY;
8597*4882a593Smuzhiyun }
8598*4882a593Smuzhiyun #endif
8599*4882a593Smuzhiyun
8600*4882a593Smuzhiyun /*
8601*4882a593Smuzhiyun * File operations structure for management interface
8602*4882a593Smuzhiyun */
8603*4882a593Smuzhiyun static const struct file_operations megasas_mgmt_fops = {
8604*4882a593Smuzhiyun .owner = THIS_MODULE,
8605*4882a593Smuzhiyun .open = megasas_mgmt_open,
8606*4882a593Smuzhiyun .fasync = megasas_mgmt_fasync,
8607*4882a593Smuzhiyun .unlocked_ioctl = megasas_mgmt_ioctl,
8608*4882a593Smuzhiyun .poll = megasas_mgmt_poll,
8609*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
8610*4882a593Smuzhiyun .compat_ioctl = megasas_mgmt_compat_ioctl,
8611*4882a593Smuzhiyun #endif
8612*4882a593Smuzhiyun .llseek = noop_llseek,
8613*4882a593Smuzhiyun };
8614*4882a593Smuzhiyun
8615*4882a593Smuzhiyun /*
8616*4882a593Smuzhiyun * PCI hotplug support registration structure
8617*4882a593Smuzhiyun */
8618*4882a593Smuzhiyun static struct pci_driver megasas_pci_driver = {
8619*4882a593Smuzhiyun
8620*4882a593Smuzhiyun .name = "megaraid_sas",
8621*4882a593Smuzhiyun .id_table = megasas_pci_table,
8622*4882a593Smuzhiyun .probe = megasas_probe_one,
8623*4882a593Smuzhiyun .remove = megasas_detach_one,
8624*4882a593Smuzhiyun .suspend = megasas_suspend,
8625*4882a593Smuzhiyun .resume = megasas_resume,
8626*4882a593Smuzhiyun .shutdown = megasas_shutdown,
8627*4882a593Smuzhiyun };
8628*4882a593Smuzhiyun
8629*4882a593Smuzhiyun /*
8630*4882a593Smuzhiyun * Sysfs driver attributes
8631*4882a593Smuzhiyun */
version_show(struct device_driver * dd,char * buf)8632*4882a593Smuzhiyun static ssize_t version_show(struct device_driver *dd, char *buf)
8633*4882a593Smuzhiyun {
8634*4882a593Smuzhiyun return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8635*4882a593Smuzhiyun MEGASAS_VERSION);
8636*4882a593Smuzhiyun }
8637*4882a593Smuzhiyun static DRIVER_ATTR_RO(version);
8638*4882a593Smuzhiyun
release_date_show(struct device_driver * dd,char * buf)8639*4882a593Smuzhiyun static ssize_t release_date_show(struct device_driver *dd, char *buf)
8640*4882a593Smuzhiyun {
8641*4882a593Smuzhiyun return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8642*4882a593Smuzhiyun MEGASAS_RELDATE);
8643*4882a593Smuzhiyun }
8644*4882a593Smuzhiyun static DRIVER_ATTR_RO(release_date);
8645*4882a593Smuzhiyun
support_poll_for_event_show(struct device_driver * dd,char * buf)8646*4882a593Smuzhiyun static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8647*4882a593Smuzhiyun {
8648*4882a593Smuzhiyun return sprintf(buf, "%u\n", support_poll_for_event);
8649*4882a593Smuzhiyun }
8650*4882a593Smuzhiyun static DRIVER_ATTR_RO(support_poll_for_event);
8651*4882a593Smuzhiyun
support_device_change_show(struct device_driver * dd,char * buf)8652*4882a593Smuzhiyun static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8653*4882a593Smuzhiyun {
8654*4882a593Smuzhiyun return sprintf(buf, "%u\n", support_device_change);
8655*4882a593Smuzhiyun }
8656*4882a593Smuzhiyun static DRIVER_ATTR_RO(support_device_change);
8657*4882a593Smuzhiyun
dbg_lvl_show(struct device_driver * dd,char * buf)8658*4882a593Smuzhiyun static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8659*4882a593Smuzhiyun {
8660*4882a593Smuzhiyun return sprintf(buf, "%u\n", megasas_dbg_lvl);
8661*4882a593Smuzhiyun }
8662*4882a593Smuzhiyun
dbg_lvl_store(struct device_driver * dd,const char * buf,size_t count)8663*4882a593Smuzhiyun static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8664*4882a593Smuzhiyun size_t count)
8665*4882a593Smuzhiyun {
8666*4882a593Smuzhiyun int retval = count;
8667*4882a593Smuzhiyun
8668*4882a593Smuzhiyun if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8669*4882a593Smuzhiyun printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8670*4882a593Smuzhiyun retval = -EINVAL;
8671*4882a593Smuzhiyun }
8672*4882a593Smuzhiyun return retval;
8673*4882a593Smuzhiyun }
8674*4882a593Smuzhiyun static DRIVER_ATTR_RW(dbg_lvl);
8675*4882a593Smuzhiyun
8676*4882a593Smuzhiyun static ssize_t
support_nvme_encapsulation_show(struct device_driver * dd,char * buf)8677*4882a593Smuzhiyun support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8678*4882a593Smuzhiyun {
8679*4882a593Smuzhiyun return sprintf(buf, "%u\n", support_nvme_encapsulation);
8680*4882a593Smuzhiyun }
8681*4882a593Smuzhiyun
8682*4882a593Smuzhiyun static DRIVER_ATTR_RO(support_nvme_encapsulation);
8683*4882a593Smuzhiyun
8684*4882a593Smuzhiyun static ssize_t
support_pci_lane_margining_show(struct device_driver * dd,char * buf)8685*4882a593Smuzhiyun support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8686*4882a593Smuzhiyun {
8687*4882a593Smuzhiyun return sprintf(buf, "%u\n", support_pci_lane_margining);
8688*4882a593Smuzhiyun }
8689*4882a593Smuzhiyun
8690*4882a593Smuzhiyun static DRIVER_ATTR_RO(support_pci_lane_margining);
8691*4882a593Smuzhiyun
megasas_remove_scsi_device(struct scsi_device * sdev)8692*4882a593Smuzhiyun static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8693*4882a593Smuzhiyun {
8694*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8695*4882a593Smuzhiyun scsi_remove_device(sdev);
8696*4882a593Smuzhiyun scsi_device_put(sdev);
8697*4882a593Smuzhiyun }
8698*4882a593Smuzhiyun
8699*4882a593Smuzhiyun /**
8700*4882a593Smuzhiyun * megasas_update_device_list - Update the PD and LD device list from FW
8701*4882a593Smuzhiyun * after an AEN event notification
8702*4882a593Smuzhiyun * @instance: Adapter soft state
8703*4882a593Smuzhiyun * @event_type: Indicates type of event (PD or LD event)
8704*4882a593Smuzhiyun *
8705*4882a593Smuzhiyun * @return: Success or failure
8706*4882a593Smuzhiyun *
8707*4882a593Smuzhiyun * Issue DCMDs to Firmware to update the internal device list in driver.
8708*4882a593Smuzhiyun * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8709*4882a593Smuzhiyun * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8710*4882a593Smuzhiyun */
8711*4882a593Smuzhiyun static
megasas_update_device_list(struct megasas_instance * instance,int event_type)8712*4882a593Smuzhiyun int megasas_update_device_list(struct megasas_instance *instance,
8713*4882a593Smuzhiyun int event_type)
8714*4882a593Smuzhiyun {
8715*4882a593Smuzhiyun int dcmd_ret = DCMD_SUCCESS;
8716*4882a593Smuzhiyun
8717*4882a593Smuzhiyun if (instance->enable_fw_dev_list) {
8718*4882a593Smuzhiyun dcmd_ret = megasas_host_device_list_query(instance, false);
8719*4882a593Smuzhiyun if (dcmd_ret != DCMD_SUCCESS)
8720*4882a593Smuzhiyun goto out;
8721*4882a593Smuzhiyun } else {
8722*4882a593Smuzhiyun if (event_type & SCAN_PD_CHANNEL) {
8723*4882a593Smuzhiyun dcmd_ret = megasas_get_pd_list(instance);
8724*4882a593Smuzhiyun
8725*4882a593Smuzhiyun if (dcmd_ret != DCMD_SUCCESS)
8726*4882a593Smuzhiyun goto out;
8727*4882a593Smuzhiyun }
8728*4882a593Smuzhiyun
8729*4882a593Smuzhiyun if (event_type & SCAN_VD_CHANNEL) {
8730*4882a593Smuzhiyun if (!instance->requestorId ||
8731*4882a593Smuzhiyun (instance->requestorId &&
8732*4882a593Smuzhiyun megasas_get_ld_vf_affiliation(instance, 0))) {
8733*4882a593Smuzhiyun dcmd_ret = megasas_ld_list_query(instance,
8734*4882a593Smuzhiyun MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8735*4882a593Smuzhiyun if (dcmd_ret != DCMD_SUCCESS)
8736*4882a593Smuzhiyun goto out;
8737*4882a593Smuzhiyun }
8738*4882a593Smuzhiyun }
8739*4882a593Smuzhiyun }
8740*4882a593Smuzhiyun
8741*4882a593Smuzhiyun out:
8742*4882a593Smuzhiyun return dcmd_ret;
8743*4882a593Smuzhiyun }
8744*4882a593Smuzhiyun
8745*4882a593Smuzhiyun /**
8746*4882a593Smuzhiyun * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
8747*4882a593Smuzhiyun * after an AEN event notification
8748*4882a593Smuzhiyun * @instance: Adapter soft state
8749*4882a593Smuzhiyun * @scan_type: Indicates type of devices (PD/LD) to add
8750*4882a593Smuzhiyun * @return void
8751*4882a593Smuzhiyun */
8752*4882a593Smuzhiyun static
megasas_add_remove_devices(struct megasas_instance * instance,int scan_type)8753*4882a593Smuzhiyun void megasas_add_remove_devices(struct megasas_instance *instance,
8754*4882a593Smuzhiyun int scan_type)
8755*4882a593Smuzhiyun {
8756*4882a593Smuzhiyun int i, j;
8757*4882a593Smuzhiyun u16 pd_index = 0;
8758*4882a593Smuzhiyun u16 ld_index = 0;
8759*4882a593Smuzhiyun u16 channel = 0, id = 0;
8760*4882a593Smuzhiyun struct Scsi_Host *host;
8761*4882a593Smuzhiyun struct scsi_device *sdev1;
8762*4882a593Smuzhiyun struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8763*4882a593Smuzhiyun struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8764*4882a593Smuzhiyun
8765*4882a593Smuzhiyun host = instance->host;
8766*4882a593Smuzhiyun
8767*4882a593Smuzhiyun if (instance->enable_fw_dev_list) {
8768*4882a593Smuzhiyun targetid_list = instance->host_device_list_buf;
8769*4882a593Smuzhiyun for (i = 0; i < targetid_list->count; i++) {
8770*4882a593Smuzhiyun targetid_entry = &targetid_list->host_device_list[i];
8771*4882a593Smuzhiyun if (targetid_entry->flags.u.bits.is_sys_pd) {
8772*4882a593Smuzhiyun channel = le16_to_cpu(targetid_entry->target_id) /
8773*4882a593Smuzhiyun MEGASAS_MAX_DEV_PER_CHANNEL;
8774*4882a593Smuzhiyun id = le16_to_cpu(targetid_entry->target_id) %
8775*4882a593Smuzhiyun MEGASAS_MAX_DEV_PER_CHANNEL;
8776*4882a593Smuzhiyun } else {
8777*4882a593Smuzhiyun channel = MEGASAS_MAX_PD_CHANNELS +
8778*4882a593Smuzhiyun (le16_to_cpu(targetid_entry->target_id) /
8779*4882a593Smuzhiyun MEGASAS_MAX_DEV_PER_CHANNEL);
8780*4882a593Smuzhiyun id = le16_to_cpu(targetid_entry->target_id) %
8781*4882a593Smuzhiyun MEGASAS_MAX_DEV_PER_CHANNEL;
8782*4882a593Smuzhiyun }
8783*4882a593Smuzhiyun sdev1 = scsi_device_lookup(host, channel, id, 0);
8784*4882a593Smuzhiyun if (!sdev1) {
8785*4882a593Smuzhiyun scsi_add_device(host, channel, id, 0);
8786*4882a593Smuzhiyun } else {
8787*4882a593Smuzhiyun scsi_device_put(sdev1);
8788*4882a593Smuzhiyun }
8789*4882a593Smuzhiyun }
8790*4882a593Smuzhiyun }
8791*4882a593Smuzhiyun
8792*4882a593Smuzhiyun if (scan_type & SCAN_PD_CHANNEL) {
8793*4882a593Smuzhiyun for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8794*4882a593Smuzhiyun for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8795*4882a593Smuzhiyun pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8796*4882a593Smuzhiyun sdev1 = scsi_device_lookup(host, i, j, 0);
8797*4882a593Smuzhiyun if (instance->pd_list[pd_index].driveState ==
8798*4882a593Smuzhiyun MR_PD_STATE_SYSTEM) {
8799*4882a593Smuzhiyun if (!sdev1)
8800*4882a593Smuzhiyun scsi_add_device(host, i, j, 0);
8801*4882a593Smuzhiyun else
8802*4882a593Smuzhiyun scsi_device_put(sdev1);
8803*4882a593Smuzhiyun } else {
8804*4882a593Smuzhiyun if (sdev1)
8805*4882a593Smuzhiyun megasas_remove_scsi_device(sdev1);
8806*4882a593Smuzhiyun }
8807*4882a593Smuzhiyun }
8808*4882a593Smuzhiyun }
8809*4882a593Smuzhiyun }
8810*4882a593Smuzhiyun
8811*4882a593Smuzhiyun if (scan_type & SCAN_VD_CHANNEL) {
8812*4882a593Smuzhiyun for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8813*4882a593Smuzhiyun for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8814*4882a593Smuzhiyun ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8815*4882a593Smuzhiyun sdev1 = scsi_device_lookup(host,
8816*4882a593Smuzhiyun MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8817*4882a593Smuzhiyun if (instance->ld_ids[ld_index] != 0xff) {
8818*4882a593Smuzhiyun if (!sdev1)
8819*4882a593Smuzhiyun scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8820*4882a593Smuzhiyun else
8821*4882a593Smuzhiyun scsi_device_put(sdev1);
8822*4882a593Smuzhiyun } else {
8823*4882a593Smuzhiyun if (sdev1)
8824*4882a593Smuzhiyun megasas_remove_scsi_device(sdev1);
8825*4882a593Smuzhiyun }
8826*4882a593Smuzhiyun }
8827*4882a593Smuzhiyun }
8828*4882a593Smuzhiyun }
8829*4882a593Smuzhiyun
8830*4882a593Smuzhiyun }
8831*4882a593Smuzhiyun
8832*4882a593Smuzhiyun static void
megasas_aen_polling(struct work_struct * work)8833*4882a593Smuzhiyun megasas_aen_polling(struct work_struct *work)
8834*4882a593Smuzhiyun {
8835*4882a593Smuzhiyun struct megasas_aen_event *ev =
8836*4882a593Smuzhiyun container_of(work, struct megasas_aen_event, hotplug_work.work);
8837*4882a593Smuzhiyun struct megasas_instance *instance = ev->instance;
8838*4882a593Smuzhiyun union megasas_evt_class_locale class_locale;
8839*4882a593Smuzhiyun int event_type = 0;
8840*4882a593Smuzhiyun u32 seq_num;
8841*4882a593Smuzhiyun u16 ld_target_id;
8842*4882a593Smuzhiyun int error;
8843*4882a593Smuzhiyun u8 dcmd_ret = DCMD_SUCCESS;
8844*4882a593Smuzhiyun struct scsi_device *sdev1;
8845*4882a593Smuzhiyun
8846*4882a593Smuzhiyun if (!instance) {
8847*4882a593Smuzhiyun printk(KERN_ERR "invalid instance!\n");
8848*4882a593Smuzhiyun kfree(ev);
8849*4882a593Smuzhiyun return;
8850*4882a593Smuzhiyun }
8851*4882a593Smuzhiyun
8852*4882a593Smuzhiyun /* Don't run the event workqueue thread if OCR is running */
8853*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
8854*4882a593Smuzhiyun
8855*4882a593Smuzhiyun instance->ev = NULL;
8856*4882a593Smuzhiyun if (instance->evt_detail) {
8857*4882a593Smuzhiyun megasas_decode_evt(instance);
8858*4882a593Smuzhiyun
8859*4882a593Smuzhiyun switch (le32_to_cpu(instance->evt_detail->code)) {
8860*4882a593Smuzhiyun
8861*4882a593Smuzhiyun case MR_EVT_PD_INSERTED:
8862*4882a593Smuzhiyun case MR_EVT_PD_REMOVED:
8863*4882a593Smuzhiyun event_type = SCAN_PD_CHANNEL;
8864*4882a593Smuzhiyun break;
8865*4882a593Smuzhiyun
8866*4882a593Smuzhiyun case MR_EVT_LD_OFFLINE:
8867*4882a593Smuzhiyun case MR_EVT_LD_DELETED:
8868*4882a593Smuzhiyun ld_target_id = instance->evt_detail->args.ld.target_id;
8869*4882a593Smuzhiyun sdev1 = scsi_device_lookup(instance->host,
8870*4882a593Smuzhiyun MEGASAS_MAX_PD_CHANNELS +
8871*4882a593Smuzhiyun (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
8872*4882a593Smuzhiyun (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
8873*4882a593Smuzhiyun 0);
8874*4882a593Smuzhiyun if (sdev1)
8875*4882a593Smuzhiyun megasas_remove_scsi_device(sdev1);
8876*4882a593Smuzhiyun
8877*4882a593Smuzhiyun event_type = SCAN_VD_CHANNEL;
8878*4882a593Smuzhiyun break;
8879*4882a593Smuzhiyun case MR_EVT_LD_CREATED:
8880*4882a593Smuzhiyun event_type = SCAN_VD_CHANNEL;
8881*4882a593Smuzhiyun break;
8882*4882a593Smuzhiyun
8883*4882a593Smuzhiyun case MR_EVT_CFG_CLEARED:
8884*4882a593Smuzhiyun case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8885*4882a593Smuzhiyun case MR_EVT_FOREIGN_CFG_IMPORTED:
8886*4882a593Smuzhiyun case MR_EVT_LD_STATE_CHANGE:
8887*4882a593Smuzhiyun event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8888*4882a593Smuzhiyun dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8889*4882a593Smuzhiyun instance->host->host_no);
8890*4882a593Smuzhiyun break;
8891*4882a593Smuzhiyun
8892*4882a593Smuzhiyun case MR_EVT_CTRL_PROP_CHANGED:
8893*4882a593Smuzhiyun dcmd_ret = megasas_get_ctrl_info(instance);
8894*4882a593Smuzhiyun if (dcmd_ret == DCMD_SUCCESS &&
8895*4882a593Smuzhiyun instance->snapdump_wait_time) {
8896*4882a593Smuzhiyun megasas_get_snapdump_properties(instance);
8897*4882a593Smuzhiyun dev_info(&instance->pdev->dev,
8898*4882a593Smuzhiyun "Snap dump wait time\t: %d\n",
8899*4882a593Smuzhiyun instance->snapdump_wait_time);
8900*4882a593Smuzhiyun }
8901*4882a593Smuzhiyun break;
8902*4882a593Smuzhiyun default:
8903*4882a593Smuzhiyun event_type = 0;
8904*4882a593Smuzhiyun break;
8905*4882a593Smuzhiyun }
8906*4882a593Smuzhiyun } else {
8907*4882a593Smuzhiyun dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8908*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8909*4882a593Smuzhiyun kfree(ev);
8910*4882a593Smuzhiyun return;
8911*4882a593Smuzhiyun }
8912*4882a593Smuzhiyun
8913*4882a593Smuzhiyun if (event_type)
8914*4882a593Smuzhiyun dcmd_ret = megasas_update_device_list(instance, event_type);
8915*4882a593Smuzhiyun
8916*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8917*4882a593Smuzhiyun
8918*4882a593Smuzhiyun if (event_type && dcmd_ret == DCMD_SUCCESS)
8919*4882a593Smuzhiyun megasas_add_remove_devices(instance, event_type);
8920*4882a593Smuzhiyun
8921*4882a593Smuzhiyun if (dcmd_ret == DCMD_SUCCESS)
8922*4882a593Smuzhiyun seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8923*4882a593Smuzhiyun else
8924*4882a593Smuzhiyun seq_num = instance->last_seq_num;
8925*4882a593Smuzhiyun
8926*4882a593Smuzhiyun /* Register AEN with FW for latest sequence number plus 1 */
8927*4882a593Smuzhiyun class_locale.members.reserved = 0;
8928*4882a593Smuzhiyun class_locale.members.locale = MR_EVT_LOCALE_ALL;
8929*4882a593Smuzhiyun class_locale.members.class = MR_EVT_CLASS_DEBUG;
8930*4882a593Smuzhiyun
8931*4882a593Smuzhiyun if (instance->aen_cmd != NULL) {
8932*4882a593Smuzhiyun kfree(ev);
8933*4882a593Smuzhiyun return;
8934*4882a593Smuzhiyun }
8935*4882a593Smuzhiyun
8936*4882a593Smuzhiyun mutex_lock(&instance->reset_mutex);
8937*4882a593Smuzhiyun error = megasas_register_aen(instance, seq_num,
8938*4882a593Smuzhiyun class_locale.word);
8939*4882a593Smuzhiyun if (error)
8940*4882a593Smuzhiyun dev_err(&instance->pdev->dev,
8941*4882a593Smuzhiyun "register aen failed error %x\n", error);
8942*4882a593Smuzhiyun
8943*4882a593Smuzhiyun mutex_unlock(&instance->reset_mutex);
8944*4882a593Smuzhiyun kfree(ev);
8945*4882a593Smuzhiyun }
8946*4882a593Smuzhiyun
8947*4882a593Smuzhiyun /**
8948*4882a593Smuzhiyun * megasas_init - Driver load entry point
8949*4882a593Smuzhiyun */
megasas_init(void)8950*4882a593Smuzhiyun static int __init megasas_init(void)
8951*4882a593Smuzhiyun {
8952*4882a593Smuzhiyun int rval;
8953*4882a593Smuzhiyun
8954*4882a593Smuzhiyun /*
8955*4882a593Smuzhiyun * Booted in kdump kernel, minimize memory footprints by
8956*4882a593Smuzhiyun * disabling few features
8957*4882a593Smuzhiyun */
8958*4882a593Smuzhiyun if (reset_devices) {
8959*4882a593Smuzhiyun msix_vectors = 1;
8960*4882a593Smuzhiyun rdpq_enable = 0;
8961*4882a593Smuzhiyun dual_qdepth_disable = 1;
8962*4882a593Smuzhiyun }
8963*4882a593Smuzhiyun
8964*4882a593Smuzhiyun /*
8965*4882a593Smuzhiyun * Announce driver version and other information
8966*4882a593Smuzhiyun */
8967*4882a593Smuzhiyun pr_info("megasas: %s\n", MEGASAS_VERSION);
8968*4882a593Smuzhiyun
8969*4882a593Smuzhiyun spin_lock_init(&poll_aen_lock);
8970*4882a593Smuzhiyun
8971*4882a593Smuzhiyun support_poll_for_event = 2;
8972*4882a593Smuzhiyun support_device_change = 1;
8973*4882a593Smuzhiyun support_nvme_encapsulation = true;
8974*4882a593Smuzhiyun support_pci_lane_margining = true;
8975*4882a593Smuzhiyun
8976*4882a593Smuzhiyun memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8977*4882a593Smuzhiyun
8978*4882a593Smuzhiyun /*
8979*4882a593Smuzhiyun * Register character device node
8980*4882a593Smuzhiyun */
8981*4882a593Smuzhiyun rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8982*4882a593Smuzhiyun
8983*4882a593Smuzhiyun if (rval < 0) {
8984*4882a593Smuzhiyun printk(KERN_DEBUG "megasas: failed to open device node\n");
8985*4882a593Smuzhiyun return rval;
8986*4882a593Smuzhiyun }
8987*4882a593Smuzhiyun
8988*4882a593Smuzhiyun megasas_mgmt_majorno = rval;
8989*4882a593Smuzhiyun
8990*4882a593Smuzhiyun megasas_init_debugfs();
8991*4882a593Smuzhiyun
8992*4882a593Smuzhiyun /*
8993*4882a593Smuzhiyun * Register ourselves as PCI hotplug module
8994*4882a593Smuzhiyun */
8995*4882a593Smuzhiyun rval = pci_register_driver(&megasas_pci_driver);
8996*4882a593Smuzhiyun
8997*4882a593Smuzhiyun if (rval) {
8998*4882a593Smuzhiyun printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8999*4882a593Smuzhiyun goto err_pcidrv;
9000*4882a593Smuzhiyun }
9001*4882a593Smuzhiyun
9002*4882a593Smuzhiyun if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
9003*4882a593Smuzhiyun (event_log_level > MFI_EVT_CLASS_DEAD)) {
9004*4882a593Smuzhiyun pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
9005*4882a593Smuzhiyun event_log_level = MFI_EVT_CLASS_CRITICAL;
9006*4882a593Smuzhiyun }
9007*4882a593Smuzhiyun
9008*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9009*4882a593Smuzhiyun &driver_attr_version);
9010*4882a593Smuzhiyun if (rval)
9011*4882a593Smuzhiyun goto err_dcf_attr_ver;
9012*4882a593Smuzhiyun
9013*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9014*4882a593Smuzhiyun &driver_attr_release_date);
9015*4882a593Smuzhiyun if (rval)
9016*4882a593Smuzhiyun goto err_dcf_rel_date;
9017*4882a593Smuzhiyun
9018*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9019*4882a593Smuzhiyun &driver_attr_support_poll_for_event);
9020*4882a593Smuzhiyun if (rval)
9021*4882a593Smuzhiyun goto err_dcf_support_poll_for_event;
9022*4882a593Smuzhiyun
9023*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9024*4882a593Smuzhiyun &driver_attr_dbg_lvl);
9025*4882a593Smuzhiyun if (rval)
9026*4882a593Smuzhiyun goto err_dcf_dbg_lvl;
9027*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9028*4882a593Smuzhiyun &driver_attr_support_device_change);
9029*4882a593Smuzhiyun if (rval)
9030*4882a593Smuzhiyun goto err_dcf_support_device_change;
9031*4882a593Smuzhiyun
9032*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9033*4882a593Smuzhiyun &driver_attr_support_nvme_encapsulation);
9034*4882a593Smuzhiyun if (rval)
9035*4882a593Smuzhiyun goto err_dcf_support_nvme_encapsulation;
9036*4882a593Smuzhiyun
9037*4882a593Smuzhiyun rval = driver_create_file(&megasas_pci_driver.driver,
9038*4882a593Smuzhiyun &driver_attr_support_pci_lane_margining);
9039*4882a593Smuzhiyun if (rval)
9040*4882a593Smuzhiyun goto err_dcf_support_pci_lane_margining;
9041*4882a593Smuzhiyun
9042*4882a593Smuzhiyun return rval;
9043*4882a593Smuzhiyun
9044*4882a593Smuzhiyun err_dcf_support_pci_lane_margining:
9045*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9046*4882a593Smuzhiyun &driver_attr_support_nvme_encapsulation);
9047*4882a593Smuzhiyun
9048*4882a593Smuzhiyun err_dcf_support_nvme_encapsulation:
9049*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9050*4882a593Smuzhiyun &driver_attr_support_device_change);
9051*4882a593Smuzhiyun
9052*4882a593Smuzhiyun err_dcf_support_device_change:
9053*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9054*4882a593Smuzhiyun &driver_attr_dbg_lvl);
9055*4882a593Smuzhiyun err_dcf_dbg_lvl:
9056*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9057*4882a593Smuzhiyun &driver_attr_support_poll_for_event);
9058*4882a593Smuzhiyun err_dcf_support_poll_for_event:
9059*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9060*4882a593Smuzhiyun &driver_attr_release_date);
9061*4882a593Smuzhiyun err_dcf_rel_date:
9062*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9063*4882a593Smuzhiyun err_dcf_attr_ver:
9064*4882a593Smuzhiyun pci_unregister_driver(&megasas_pci_driver);
9065*4882a593Smuzhiyun err_pcidrv:
9066*4882a593Smuzhiyun megasas_exit_debugfs();
9067*4882a593Smuzhiyun unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9068*4882a593Smuzhiyun return rval;
9069*4882a593Smuzhiyun }
9070*4882a593Smuzhiyun
9071*4882a593Smuzhiyun /**
9072*4882a593Smuzhiyun * megasas_exit - Driver unload entry point
9073*4882a593Smuzhiyun */
megasas_exit(void)9074*4882a593Smuzhiyun static void __exit megasas_exit(void)
9075*4882a593Smuzhiyun {
9076*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9077*4882a593Smuzhiyun &driver_attr_dbg_lvl);
9078*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9079*4882a593Smuzhiyun &driver_attr_support_poll_for_event);
9080*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9081*4882a593Smuzhiyun &driver_attr_support_device_change);
9082*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9083*4882a593Smuzhiyun &driver_attr_release_date);
9084*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9085*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9086*4882a593Smuzhiyun &driver_attr_support_nvme_encapsulation);
9087*4882a593Smuzhiyun driver_remove_file(&megasas_pci_driver.driver,
9088*4882a593Smuzhiyun &driver_attr_support_pci_lane_margining);
9089*4882a593Smuzhiyun
9090*4882a593Smuzhiyun pci_unregister_driver(&megasas_pci_driver);
9091*4882a593Smuzhiyun megasas_exit_debugfs();
9092*4882a593Smuzhiyun unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9093*4882a593Smuzhiyun }
9094*4882a593Smuzhiyun
9095*4882a593Smuzhiyun module_init(megasas_init);
9096*4882a593Smuzhiyun module_exit(megasas_exit);
9097