xref: /OK3568_Linux_fs/kernel/drivers/scsi/bfa/bfa_ioc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2014- QLogic Corporation.
5*4882a593Smuzhiyun  * All rights reserved
6*4882a593Smuzhiyun  * www.qlogic.com
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Linux driver for QLogic BR-series Fibre Channel Host Bus Adapter.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "bfad_drv.h"
12*4882a593Smuzhiyun #include "bfad_im.h"
13*4882a593Smuzhiyun #include "bfa_ioc.h"
14*4882a593Smuzhiyun #include "bfi_reg.h"
15*4882a593Smuzhiyun #include "bfa_defs.h"
16*4882a593Smuzhiyun #include "bfa_defs_svc.h"
17*4882a593Smuzhiyun #include "bfi.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun BFA_TRC_FILE(CNA, IOC);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun  * IOC local definitions
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun #define BFA_IOC_TOV		3000	/* msecs */
25*4882a593Smuzhiyun #define BFA_IOC_HWSEM_TOV	500	/* msecs */
26*4882a593Smuzhiyun #define BFA_IOC_HB_TOV		500	/* msecs */
27*4882a593Smuzhiyun #define BFA_IOC_TOV_RECOVER	 BFA_IOC_HB_TOV
28*4882a593Smuzhiyun #define BFA_IOC_POLL_TOV	BFA_TIMER_FREQ
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define bfa_ioc_timer_start(__ioc)					\
31*4882a593Smuzhiyun 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
32*4882a593Smuzhiyun 			bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
33*4882a593Smuzhiyun #define bfa_ioc_timer_stop(__ioc)   bfa_timer_stop(&(__ioc)->ioc_timer)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define bfa_hb_timer_start(__ioc)					\
36*4882a593Smuzhiyun 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer,		\
37*4882a593Smuzhiyun 			bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
38*4882a593Smuzhiyun #define bfa_hb_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->hb_timer)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define bfa_ioc_state_disabled(__sm)		\
43*4882a593Smuzhiyun 	(((__sm) == BFI_IOC_UNINIT) ||		\
44*4882a593Smuzhiyun 	((__sm) == BFI_IOC_INITING) ||		\
45*4882a593Smuzhiyun 	((__sm) == BFI_IOC_HWINIT) ||		\
46*4882a593Smuzhiyun 	((__sm) == BFI_IOC_DISABLED) ||		\
47*4882a593Smuzhiyun 	((__sm) == BFI_IOC_FAIL) ||		\
48*4882a593Smuzhiyun 	((__sm) == BFI_IOC_CFG_DISABLED))
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define bfa_ioc_firmware_lock(__ioc)			\
55*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
56*4882a593Smuzhiyun #define bfa_ioc_firmware_unlock(__ioc)			\
57*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
58*4882a593Smuzhiyun #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
59*4882a593Smuzhiyun #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
60*4882a593Smuzhiyun #define bfa_ioc_notify_fail(__ioc)              \
61*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
62*4882a593Smuzhiyun #define bfa_ioc_sync_start(__ioc)               \
63*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
64*4882a593Smuzhiyun #define bfa_ioc_sync_join(__ioc)                \
65*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
66*4882a593Smuzhiyun #define bfa_ioc_sync_leave(__ioc)               \
67*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
68*4882a593Smuzhiyun #define bfa_ioc_sync_ack(__ioc)                 \
69*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
70*4882a593Smuzhiyun #define bfa_ioc_sync_complete(__ioc)            \
71*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
72*4882a593Smuzhiyun #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
73*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
74*4882a593Smuzhiyun #define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
75*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
76*4882a593Smuzhiyun #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
77*4882a593Smuzhiyun 		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
78*4882a593Smuzhiyun #define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
79*4882a593Smuzhiyun 			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define bfa_ioc_mbox_cmd_pending(__ioc)		\
82*4882a593Smuzhiyun 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
83*4882a593Smuzhiyun 			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun bfa_boolean_t bfa_auto_recover = BFA_TRUE;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * forward declarations
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
91*4882a593Smuzhiyun static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
92*4882a593Smuzhiyun static void bfa_ioc_timeout(void *ioc);
93*4882a593Smuzhiyun static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
94*4882a593Smuzhiyun static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
95*4882a593Smuzhiyun static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
96*4882a593Smuzhiyun static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
97*4882a593Smuzhiyun static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
98*4882a593Smuzhiyun static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
99*4882a593Smuzhiyun static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
100*4882a593Smuzhiyun static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
101*4882a593Smuzhiyun static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
102*4882a593Smuzhiyun 				enum bfa_ioc_event_e event);
103*4882a593Smuzhiyun static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
104*4882a593Smuzhiyun static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
105*4882a593Smuzhiyun static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
106*4882a593Smuzhiyun static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
107*4882a593Smuzhiyun static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp(
108*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *base_fwhdr,
109*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp);
110*4882a593Smuzhiyun static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp(
111*4882a593Smuzhiyun 				struct bfa_ioc_s *ioc,
112*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *base_fwhdr);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * IOC state machine definitions/declarations
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun enum ioc_event {
118*4882a593Smuzhiyun 	IOC_E_RESET		= 1,	/*  IOC reset request		*/
119*4882a593Smuzhiyun 	IOC_E_ENABLE		= 2,	/*  IOC enable request		*/
120*4882a593Smuzhiyun 	IOC_E_DISABLE		= 3,	/*  IOC disable request	*/
121*4882a593Smuzhiyun 	IOC_E_DETACH		= 4,	/*  driver detach cleanup	*/
122*4882a593Smuzhiyun 	IOC_E_ENABLED		= 5,	/*  f/w enabled		*/
123*4882a593Smuzhiyun 	IOC_E_FWRSP_GETATTR	= 6,	/*  IOC get attribute response	*/
124*4882a593Smuzhiyun 	IOC_E_DISABLED		= 7,	/*  f/w disabled		*/
125*4882a593Smuzhiyun 	IOC_E_PFFAILED		= 8,	/*  failure notice by iocpf sm	*/
126*4882a593Smuzhiyun 	IOC_E_HBFAIL		= 9,	/*  heartbeat failure		*/
127*4882a593Smuzhiyun 	IOC_E_HWERROR		= 10,	/*  hardware error interrupt	*/
128*4882a593Smuzhiyun 	IOC_E_TIMEOUT		= 11,	/*  timeout			*/
129*4882a593Smuzhiyun 	IOC_E_HWFAILED		= 12,	/*  PCI mapping failure notice	*/
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
133*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
134*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
135*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
136*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
137*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
138*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
139*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
140*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
141*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static struct bfa_sm_table_s ioc_sm_table[] = {
144*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
145*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
146*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
147*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
148*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
149*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
150*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
151*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
152*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
153*4882a593Smuzhiyun 	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * IOCPF state machine definitions/declarations
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun #define bfa_iocpf_timer_start(__ioc)					\
161*4882a593Smuzhiyun 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
162*4882a593Smuzhiyun 			bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
163*4882a593Smuzhiyun #define bfa_iocpf_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->ioc_timer)
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define bfa_iocpf_poll_timer_start(__ioc)				\
166*4882a593Smuzhiyun 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
167*4882a593Smuzhiyun 			bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define bfa_sem_timer_start(__ioc)					\
170*4882a593Smuzhiyun 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer,	\
171*4882a593Smuzhiyun 			bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
172*4882a593Smuzhiyun #define bfa_sem_timer_stop(__ioc)	bfa_timer_stop(&(__ioc)->sem_timer)
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun  * Forward declareations for iocpf state machine
176*4882a593Smuzhiyun  */
177*4882a593Smuzhiyun static void bfa_iocpf_timeout(void *ioc_arg);
178*4882a593Smuzhiyun static void bfa_iocpf_sem_timeout(void *ioc_arg);
179*4882a593Smuzhiyun static void bfa_iocpf_poll_timeout(void *ioc_arg);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun  * IOCPF state machine events
183*4882a593Smuzhiyun  */
184*4882a593Smuzhiyun enum iocpf_event {
185*4882a593Smuzhiyun 	IOCPF_E_ENABLE		= 1,	/*  IOCPF enable request	*/
186*4882a593Smuzhiyun 	IOCPF_E_DISABLE		= 2,	/*  IOCPF disable request	*/
187*4882a593Smuzhiyun 	IOCPF_E_STOP		= 3,	/*  stop on driver detach	*/
188*4882a593Smuzhiyun 	IOCPF_E_FWREADY		= 4,	/*  f/w initialization done	*/
189*4882a593Smuzhiyun 	IOCPF_E_FWRSP_ENABLE	= 5,	/*  enable f/w response	*/
190*4882a593Smuzhiyun 	IOCPF_E_FWRSP_DISABLE	= 6,	/*  disable f/w response	*/
191*4882a593Smuzhiyun 	IOCPF_E_FAIL		= 7,	/*  failure notice by ioc sm	*/
192*4882a593Smuzhiyun 	IOCPF_E_INITFAIL	= 8,	/*  init fail notice by ioc sm	*/
193*4882a593Smuzhiyun 	IOCPF_E_GETATTRFAIL	= 9,	/*  init fail notice by ioc sm	*/
194*4882a593Smuzhiyun 	IOCPF_E_SEMLOCKED	= 10,	/*  h/w semaphore is locked	*/
195*4882a593Smuzhiyun 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
196*4882a593Smuzhiyun 	IOCPF_E_SEM_ERROR	= 12,	/*  h/w sem mapping error	*/
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun  * IOCPF states
201*4882a593Smuzhiyun  */
202*4882a593Smuzhiyun enum bfa_iocpf_state {
203*4882a593Smuzhiyun 	BFA_IOCPF_RESET		= 1,	/*  IOC is in reset state */
204*4882a593Smuzhiyun 	BFA_IOCPF_SEMWAIT	= 2,	/*  Waiting for IOC h/w semaphore */
205*4882a593Smuzhiyun 	BFA_IOCPF_HWINIT	= 3,	/*  IOC h/w is being initialized */
206*4882a593Smuzhiyun 	BFA_IOCPF_READY		= 4,	/*  IOCPF is initialized */
207*4882a593Smuzhiyun 	BFA_IOCPF_INITFAIL	= 5,	/*  IOCPF failed */
208*4882a593Smuzhiyun 	BFA_IOCPF_FAIL		= 6,	/*  IOCPF failed */
209*4882a593Smuzhiyun 	BFA_IOCPF_DISABLING	= 7,	/*  IOCPF is being disabled */
210*4882a593Smuzhiyun 	BFA_IOCPF_DISABLED	= 8,	/*  IOCPF is disabled */
211*4882a593Smuzhiyun 	BFA_IOCPF_FWMISMATCH	= 9,	/*  IOC f/w different from drivers */
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
215*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
216*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
217*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
218*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
219*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
220*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
221*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
222*4882a593Smuzhiyun 						enum iocpf_event);
223*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
224*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
225*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
226*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
227*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
228*4882a593Smuzhiyun 						enum iocpf_event);
229*4882a593Smuzhiyun bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun static struct bfa_sm_table_s iocpf_sm_table[] = {
232*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
233*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
234*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
235*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
236*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
237*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
238*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
239*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
240*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
241*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
242*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
243*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
244*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
245*4882a593Smuzhiyun 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
246*4882a593Smuzhiyun };
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /*
249*4882a593Smuzhiyun  * IOC State Machine
250*4882a593Smuzhiyun  */
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun  * Beginning state. IOC uninit state.
254*4882a593Smuzhiyun  */
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc_s * ioc)257*4882a593Smuzhiyun bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun  * IOC is in uninit state.
263*4882a593Smuzhiyun  */
264*4882a593Smuzhiyun static void
bfa_ioc_sm_uninit(struct bfa_ioc_s * ioc,enum ioc_event event)265*4882a593Smuzhiyun bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	bfa_trc(ioc, event);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	switch (event) {
270*4882a593Smuzhiyun 	case IOC_E_RESET:
271*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
272*4882a593Smuzhiyun 		break;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	default:
275*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun  * Reset entry actions -- initialize state machine
280*4882a593Smuzhiyun  */
281*4882a593Smuzhiyun static void
bfa_ioc_sm_reset_entry(struct bfa_ioc_s * ioc)282*4882a593Smuzhiyun bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * IOC is in reset state.
289*4882a593Smuzhiyun  */
290*4882a593Smuzhiyun static void
bfa_ioc_sm_reset(struct bfa_ioc_s * ioc,enum ioc_event event)291*4882a593Smuzhiyun bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	bfa_trc(ioc, event);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	switch (event) {
296*4882a593Smuzhiyun 	case IOC_E_ENABLE:
297*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	case IOC_E_DISABLE:
301*4882a593Smuzhiyun 		bfa_ioc_disable_comp(ioc);
302*4882a593Smuzhiyun 		break;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	case IOC_E_DETACH:
305*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
306*4882a593Smuzhiyun 		break;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	default:
309*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun static void
bfa_ioc_sm_enabling_entry(struct bfa_ioc_s * ioc)315*4882a593Smuzhiyun bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun  * Host IOC function is being enabled, awaiting response from firmware.
322*4882a593Smuzhiyun  * Semaphore is acquired.
323*4882a593Smuzhiyun  */
324*4882a593Smuzhiyun static void
bfa_ioc_sm_enabling(struct bfa_ioc_s * ioc,enum ioc_event event)325*4882a593Smuzhiyun bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	bfa_trc(ioc, event);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	switch (event) {
330*4882a593Smuzhiyun 	case IOC_E_ENABLED:
331*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
332*4882a593Smuzhiyun 		break;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	case IOC_E_PFFAILED:
335*4882a593Smuzhiyun 		/* !!! fall through !!! */
336*4882a593Smuzhiyun 	case IOC_E_HWERROR:
337*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
338*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
339*4882a593Smuzhiyun 		if (event != IOC_E_PFFAILED)
340*4882a593Smuzhiyun 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
341*4882a593Smuzhiyun 		break;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	case IOC_E_HWFAILED:
344*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
345*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
346*4882a593Smuzhiyun 		break;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	case IOC_E_DISABLE:
349*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
350*4882a593Smuzhiyun 		break;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	case IOC_E_DETACH:
353*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
354*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
355*4882a593Smuzhiyun 		break;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	case IOC_E_ENABLE:
358*4882a593Smuzhiyun 		break;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	default:
361*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc_s * ioc)367*4882a593Smuzhiyun bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	bfa_ioc_timer_start(ioc);
370*4882a593Smuzhiyun 	bfa_ioc_send_getattr(ioc);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun  * IOC configuration in progress. Timer is active.
375*4882a593Smuzhiyun  */
376*4882a593Smuzhiyun static void
bfa_ioc_sm_getattr(struct bfa_ioc_s * ioc,enum ioc_event event)377*4882a593Smuzhiyun bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	bfa_trc(ioc, event);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	switch (event) {
382*4882a593Smuzhiyun 	case IOC_E_FWRSP_GETATTR:
383*4882a593Smuzhiyun 		bfa_ioc_timer_stop(ioc);
384*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
385*4882a593Smuzhiyun 		break;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	case IOC_E_PFFAILED:
388*4882a593Smuzhiyun 	case IOC_E_HWERROR:
389*4882a593Smuzhiyun 		bfa_ioc_timer_stop(ioc);
390*4882a593Smuzhiyun 		/* !!! fall through !!! */
391*4882a593Smuzhiyun 	case IOC_E_TIMEOUT:
392*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
393*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
394*4882a593Smuzhiyun 		if (event != IOC_E_PFFAILED)
395*4882a593Smuzhiyun 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
396*4882a593Smuzhiyun 		break;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	case IOC_E_DISABLE:
399*4882a593Smuzhiyun 		bfa_ioc_timer_stop(ioc);
400*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
401*4882a593Smuzhiyun 		break;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	case IOC_E_ENABLE:
404*4882a593Smuzhiyun 		break;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	default:
407*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun static void
bfa_ioc_sm_op_entry(struct bfa_ioc_s * ioc)412*4882a593Smuzhiyun bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
417*4882a593Smuzhiyun 	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
418*4882a593Smuzhiyun 	bfa_ioc_hb_monitor(ioc);
419*4882a593Smuzhiyun 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
420*4882a593Smuzhiyun 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun static void
bfa_ioc_sm_op(struct bfa_ioc_s * ioc,enum ioc_event event)424*4882a593Smuzhiyun bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	bfa_trc(ioc, event);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	switch (event) {
429*4882a593Smuzhiyun 	case IOC_E_ENABLE:
430*4882a593Smuzhiyun 		break;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	case IOC_E_DISABLE:
433*4882a593Smuzhiyun 		bfa_hb_timer_stop(ioc);
434*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
435*4882a593Smuzhiyun 		break;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	case IOC_E_PFFAILED:
438*4882a593Smuzhiyun 	case IOC_E_HWERROR:
439*4882a593Smuzhiyun 		bfa_hb_timer_stop(ioc);
440*4882a593Smuzhiyun 		/* !!! fall through !!! */
441*4882a593Smuzhiyun 	case IOC_E_HBFAIL:
442*4882a593Smuzhiyun 		if (ioc->iocpf.auto_recover)
443*4882a593Smuzhiyun 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
444*4882a593Smuzhiyun 		else
445*4882a593Smuzhiyun 			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		bfa_ioc_fail_notify(ioc);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		if (event != IOC_E_PFFAILED)
450*4882a593Smuzhiyun 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
451*4882a593Smuzhiyun 		break;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	default:
454*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun static void
bfa_ioc_sm_disabling_entry(struct bfa_ioc_s * ioc)460*4882a593Smuzhiyun bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
463*4882a593Smuzhiyun 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
464*4882a593Smuzhiyun 	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
465*4882a593Smuzhiyun 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun  * IOC is being disabled
470*4882a593Smuzhiyun  */
471*4882a593Smuzhiyun static void
bfa_ioc_sm_disabling(struct bfa_ioc_s * ioc,enum ioc_event event)472*4882a593Smuzhiyun bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	bfa_trc(ioc, event);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	switch (event) {
477*4882a593Smuzhiyun 	case IOC_E_DISABLED:
478*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
479*4882a593Smuzhiyun 		break;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	case IOC_E_HWERROR:
482*4882a593Smuzhiyun 		/*
483*4882a593Smuzhiyun 		 * No state change.  Will move to disabled state
484*4882a593Smuzhiyun 		 * after iocpf sm completes failure processing and
485*4882a593Smuzhiyun 		 * moves to disabled state.
486*4882a593Smuzhiyun 		 */
487*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
488*4882a593Smuzhiyun 		break;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	case IOC_E_HWFAILED:
491*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
492*4882a593Smuzhiyun 		bfa_ioc_disable_comp(ioc);
493*4882a593Smuzhiyun 		break;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	default:
496*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun  * IOC disable completion entry.
502*4882a593Smuzhiyun  */
503*4882a593Smuzhiyun static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc_s * ioc)504*4882a593Smuzhiyun bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	bfa_ioc_disable_comp(ioc);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun static void
bfa_ioc_sm_disabled(struct bfa_ioc_s * ioc,enum ioc_event event)510*4882a593Smuzhiyun bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	bfa_trc(ioc, event);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	switch (event) {
515*4882a593Smuzhiyun 	case IOC_E_ENABLE:
516*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
517*4882a593Smuzhiyun 		break;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	case IOC_E_DISABLE:
520*4882a593Smuzhiyun 		ioc->cbfn->disable_cbfn(ioc->bfa);
521*4882a593Smuzhiyun 		break;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	case IOC_E_DETACH:
524*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
525*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
526*4882a593Smuzhiyun 		break;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	default:
529*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun static void
bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s * ioc)535*4882a593Smuzhiyun bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	bfa_trc(ioc, 0);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /*
541*4882a593Smuzhiyun  * Hardware initialization retry.
542*4882a593Smuzhiyun  */
543*4882a593Smuzhiyun static void
bfa_ioc_sm_fail_retry(struct bfa_ioc_s * ioc,enum ioc_event event)544*4882a593Smuzhiyun bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	bfa_trc(ioc, event);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	switch (event) {
549*4882a593Smuzhiyun 	case IOC_E_ENABLED:
550*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
551*4882a593Smuzhiyun 		break;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	case IOC_E_PFFAILED:
554*4882a593Smuzhiyun 	case IOC_E_HWERROR:
555*4882a593Smuzhiyun 		/*
556*4882a593Smuzhiyun 		 * Initialization retry failed.
557*4882a593Smuzhiyun 		 */
558*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
559*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
560*4882a593Smuzhiyun 		if (event != IOC_E_PFFAILED)
561*4882a593Smuzhiyun 			bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
562*4882a593Smuzhiyun 		break;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	case IOC_E_HWFAILED:
565*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
566*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
567*4882a593Smuzhiyun 		break;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	case IOC_E_ENABLE:
570*4882a593Smuzhiyun 		break;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	case IOC_E_DISABLE:
573*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
574*4882a593Smuzhiyun 		break;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	case IOC_E_DETACH:
577*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
578*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
579*4882a593Smuzhiyun 		break;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	default:
582*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun static void
bfa_ioc_sm_fail_entry(struct bfa_ioc_s * ioc)588*4882a593Smuzhiyun bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	bfa_trc(ioc, 0);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun  * IOC failure.
595*4882a593Smuzhiyun  */
596*4882a593Smuzhiyun static void
bfa_ioc_sm_fail(struct bfa_ioc_s * ioc,enum ioc_event event)597*4882a593Smuzhiyun bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	bfa_trc(ioc, event);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	switch (event) {
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	case IOC_E_ENABLE:
604*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
605*4882a593Smuzhiyun 		break;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	case IOC_E_DISABLE:
608*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
609*4882a593Smuzhiyun 		break;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	case IOC_E_DETACH:
612*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
613*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
614*4882a593Smuzhiyun 		break;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	case IOC_E_HWERROR:
617*4882a593Smuzhiyun 	case IOC_E_HWFAILED:
618*4882a593Smuzhiyun 		/*
619*4882a593Smuzhiyun 		 * HB failure / HW error notification, ignore.
620*4882a593Smuzhiyun 		 */
621*4882a593Smuzhiyun 		break;
622*4882a593Smuzhiyun 	default:
623*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun static void
bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s * ioc)628*4882a593Smuzhiyun bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	bfa_trc(ioc, 0);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun static void
bfa_ioc_sm_hwfail(struct bfa_ioc_s * ioc,enum ioc_event event)634*4882a593Smuzhiyun bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	bfa_trc(ioc, event);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	switch (event) {
639*4882a593Smuzhiyun 	case IOC_E_ENABLE:
640*4882a593Smuzhiyun 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
641*4882a593Smuzhiyun 		break;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	case IOC_E_DISABLE:
644*4882a593Smuzhiyun 		ioc->cbfn->disable_cbfn(ioc->bfa);
645*4882a593Smuzhiyun 		break;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	case IOC_E_DETACH:
648*4882a593Smuzhiyun 		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
649*4882a593Smuzhiyun 		break;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	case IOC_E_HWERROR:
652*4882a593Smuzhiyun 		/* Ignore - already in hwfail state */
653*4882a593Smuzhiyun 		break;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	default:
656*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun /*
661*4882a593Smuzhiyun  * IOCPF State Machine
662*4882a593Smuzhiyun  */
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun  * Reset entry actions -- initialize state machine
666*4882a593Smuzhiyun  */
667*4882a593Smuzhiyun static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s * iocpf)668*4882a593Smuzhiyun bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	iocpf->fw_mismatch_notified = BFA_FALSE;
671*4882a593Smuzhiyun 	iocpf->auto_recover = bfa_auto_recover;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun /*
675*4882a593Smuzhiyun  * Beginning state. IOC is in reset state.
676*4882a593Smuzhiyun  */
677*4882a593Smuzhiyun static void
bfa_iocpf_sm_reset(struct bfa_iocpf_s * iocpf,enum iocpf_event event)678*4882a593Smuzhiyun bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	bfa_trc(ioc, event);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	switch (event) {
685*4882a593Smuzhiyun 	case IOCPF_E_ENABLE:
686*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
687*4882a593Smuzhiyun 		break;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	case IOCPF_E_STOP:
690*4882a593Smuzhiyun 		break;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	default:
693*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun  * Semaphore should be acquired for version check.
699*4882a593Smuzhiyun  */
700*4882a593Smuzhiyun static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s * iocpf)701*4882a593Smuzhiyun bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	struct bfi_ioc_image_hdr_s	fwhdr;
704*4882a593Smuzhiyun 	u32	r32, fwstate, pgnum, loff = 0;
705*4882a593Smuzhiyun 	int	i;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	/*
708*4882a593Smuzhiyun 	 * Spin on init semaphore to serialize.
709*4882a593Smuzhiyun 	 */
710*4882a593Smuzhiyun 	r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
711*4882a593Smuzhiyun 	while (r32 & 0x1) {
712*4882a593Smuzhiyun 		udelay(20);
713*4882a593Smuzhiyun 		r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
714*4882a593Smuzhiyun 	}
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	/* h/w sem init */
717*4882a593Smuzhiyun 	fwstate = bfa_ioc_get_cur_ioc_fwstate(iocpf->ioc);
718*4882a593Smuzhiyun 	if (fwstate == BFI_IOC_UNINIT) {
719*4882a593Smuzhiyun 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
720*4882a593Smuzhiyun 		goto sem_get;
721*4882a593Smuzhiyun 	}
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
726*4882a593Smuzhiyun 		writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
727*4882a593Smuzhiyun 		goto sem_get;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/*
731*4882a593Smuzhiyun 	 * Clear fwver hdr
732*4882a593Smuzhiyun 	 */
733*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
734*4882a593Smuzhiyun 	writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
737*4882a593Smuzhiyun 		bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
738*4882a593Smuzhiyun 		loff += sizeof(u32);
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	bfa_trc(iocpf->ioc, fwstate);
742*4882a593Smuzhiyun 	bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
743*4882a593Smuzhiyun 	bfa_ioc_set_cur_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
744*4882a593Smuzhiyun 	bfa_ioc_set_alt_ioc_fwstate(iocpf->ioc, BFI_IOC_UNINIT);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	/*
747*4882a593Smuzhiyun 	 * Unlock the hw semaphore. Should be here only once per boot.
748*4882a593Smuzhiyun 	 */
749*4882a593Smuzhiyun 	bfa_ioc_ownership_reset(iocpf->ioc);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/*
752*4882a593Smuzhiyun 	 * unlock init semaphore.
753*4882a593Smuzhiyun 	 */
754*4882a593Smuzhiyun 	writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun sem_get:
757*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(iocpf->ioc);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun  * Awaiting h/w semaphore to continue with version check.
762*4882a593Smuzhiyun  */
763*4882a593Smuzhiyun static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s * iocpf,enum iocpf_event event)764*4882a593Smuzhiyun bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	bfa_trc(ioc, event);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	switch (event) {
771*4882a593Smuzhiyun 	case IOCPF_E_SEMLOCKED:
772*4882a593Smuzhiyun 		if (bfa_ioc_firmware_lock(ioc)) {
773*4882a593Smuzhiyun 			if (bfa_ioc_sync_start(ioc)) {
774*4882a593Smuzhiyun 				bfa_ioc_sync_join(ioc);
775*4882a593Smuzhiyun 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
776*4882a593Smuzhiyun 			} else {
777*4882a593Smuzhiyun 				bfa_ioc_firmware_unlock(ioc);
778*4882a593Smuzhiyun 				writel(1, ioc->ioc_regs.ioc_sem_reg);
779*4882a593Smuzhiyun 				bfa_sem_timer_start(ioc);
780*4882a593Smuzhiyun 			}
781*4882a593Smuzhiyun 		} else {
782*4882a593Smuzhiyun 			writel(1, ioc->ioc_regs.ioc_sem_reg);
783*4882a593Smuzhiyun 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
784*4882a593Smuzhiyun 		}
785*4882a593Smuzhiyun 		break;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	case IOCPF_E_SEM_ERROR:
788*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
789*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
790*4882a593Smuzhiyun 		break;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
793*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
794*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
795*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
796*4882a593Smuzhiyun 		break;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	case IOCPF_E_STOP:
799*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
800*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
801*4882a593Smuzhiyun 		break;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	default:
804*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun /*
809*4882a593Smuzhiyun  * Notify enable completion callback.
810*4882a593Smuzhiyun  */
811*4882a593Smuzhiyun static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s * iocpf)812*4882a593Smuzhiyun bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	/*
815*4882a593Smuzhiyun 	 * Call only the first time sm enters fwmismatch state.
816*4882a593Smuzhiyun 	 */
817*4882a593Smuzhiyun 	if (iocpf->fw_mismatch_notified == BFA_FALSE)
818*4882a593Smuzhiyun 		bfa_ioc_pf_fwmismatch(iocpf->ioc);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	iocpf->fw_mismatch_notified = BFA_TRUE;
821*4882a593Smuzhiyun 	bfa_iocpf_timer_start(iocpf->ioc);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun /*
825*4882a593Smuzhiyun  * Awaiting firmware version match.
826*4882a593Smuzhiyun  */
827*4882a593Smuzhiyun static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf_s * iocpf,enum iocpf_event event)828*4882a593Smuzhiyun bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	bfa_trc(ioc, event);
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	switch (event) {
835*4882a593Smuzhiyun 	case IOCPF_E_TIMEOUT:
836*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
837*4882a593Smuzhiyun 		break;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
840*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
841*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
842*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_DISABLED);
843*4882a593Smuzhiyun 		break;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	case IOCPF_E_STOP:
846*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
847*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
848*4882a593Smuzhiyun 		break;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	default:
851*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun  * Request for semaphore.
857*4882a593Smuzhiyun  */
858*4882a593Smuzhiyun static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s * iocpf)859*4882a593Smuzhiyun bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(iocpf->ioc);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun /*
865*4882a593Smuzhiyun  * Awaiting semaphore for h/w initialzation.
866*4882a593Smuzhiyun  */
867*4882a593Smuzhiyun static void
bfa_iocpf_sm_semwait(struct bfa_iocpf_s * iocpf,enum iocpf_event event)868*4882a593Smuzhiyun bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	bfa_trc(ioc, event);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	switch (event) {
875*4882a593Smuzhiyun 	case IOCPF_E_SEMLOCKED:
876*4882a593Smuzhiyun 		if (bfa_ioc_sync_complete(ioc)) {
877*4882a593Smuzhiyun 			bfa_ioc_sync_join(ioc);
878*4882a593Smuzhiyun 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
879*4882a593Smuzhiyun 		} else {
880*4882a593Smuzhiyun 			writel(1, ioc->ioc_regs.ioc_sem_reg);
881*4882a593Smuzhiyun 			bfa_sem_timer_start(ioc);
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 		break;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	case IOCPF_E_SEM_ERROR:
886*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
887*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
888*4882a593Smuzhiyun 		break;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
891*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
892*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
893*4882a593Smuzhiyun 		break;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	default:
896*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun static void
bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s * iocpf)901*4882a593Smuzhiyun bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	iocpf->poll_time = 0;
904*4882a593Smuzhiyun 	bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun /*
908*4882a593Smuzhiyun  * Hardware is being initialized. Interrupts are enabled.
909*4882a593Smuzhiyun  * Holding hardware semaphore lock.
910*4882a593Smuzhiyun  */
911*4882a593Smuzhiyun static void
bfa_iocpf_sm_hwinit(struct bfa_iocpf_s * iocpf,enum iocpf_event event)912*4882a593Smuzhiyun bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	bfa_trc(ioc, event);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	switch (event) {
919*4882a593Smuzhiyun 	case IOCPF_E_FWREADY:
920*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
921*4882a593Smuzhiyun 		break;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	case IOCPF_E_TIMEOUT:
924*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
925*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
926*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
927*4882a593Smuzhiyun 		break;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
930*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
931*4882a593Smuzhiyun 		bfa_ioc_sync_leave(ioc);
932*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
933*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
934*4882a593Smuzhiyun 		break;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	default:
937*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun static void
bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s * iocpf)942*4882a593Smuzhiyun bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	bfa_iocpf_timer_start(iocpf->ioc);
945*4882a593Smuzhiyun 	/*
946*4882a593Smuzhiyun 	 * Enable Interrupts before sending fw IOC ENABLE cmd.
947*4882a593Smuzhiyun 	 */
948*4882a593Smuzhiyun 	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
949*4882a593Smuzhiyun 	bfa_ioc_send_enable(iocpf->ioc);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun  * Host IOC function is being enabled, awaiting response from firmware.
954*4882a593Smuzhiyun  * Semaphore is acquired.
955*4882a593Smuzhiyun  */
956*4882a593Smuzhiyun static void
bfa_iocpf_sm_enabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)957*4882a593Smuzhiyun bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	bfa_trc(ioc, event);
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	switch (event) {
964*4882a593Smuzhiyun 	case IOCPF_E_FWRSP_ENABLE:
965*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
966*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
967*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
968*4882a593Smuzhiyun 		break;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	case IOCPF_E_INITFAIL:
971*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
972*4882a593Smuzhiyun 		fallthrough;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	case IOCPF_E_TIMEOUT:
975*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
976*4882a593Smuzhiyun 		if (event == IOCPF_E_TIMEOUT)
977*4882a593Smuzhiyun 			bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
978*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
979*4882a593Smuzhiyun 		break;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
982*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
983*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
984*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
985*4882a593Smuzhiyun 		break;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	default:
988*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun static void
bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s * iocpf)993*4882a593Smuzhiyun bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun static void
bfa_iocpf_sm_ready(struct bfa_iocpf_s * iocpf,enum iocpf_event event)999*4882a593Smuzhiyun bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	switch (event) {
1006*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
1007*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1008*4882a593Smuzhiyun 		break;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	case IOCPF_E_GETATTRFAIL:
1011*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
1012*4882a593Smuzhiyun 		break;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	case IOCPF_E_FAIL:
1015*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
1016*4882a593Smuzhiyun 		break;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	default:
1019*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1020*4882a593Smuzhiyun 	}
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s * iocpf)1024*4882a593Smuzhiyun bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1025*4882a593Smuzhiyun {
1026*4882a593Smuzhiyun 	bfa_iocpf_timer_start(iocpf->ioc);
1027*4882a593Smuzhiyun 	bfa_ioc_send_disable(iocpf->ioc);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun /*
1031*4882a593Smuzhiyun  * IOC is being disabled
1032*4882a593Smuzhiyun  */
1033*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabling(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1034*4882a593Smuzhiyun bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1035*4882a593Smuzhiyun {
1036*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	switch (event) {
1041*4882a593Smuzhiyun 	case IOCPF_E_FWRSP_DISABLE:
1042*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
1043*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1044*4882a593Smuzhiyun 		break;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	case IOCPF_E_FAIL:
1047*4882a593Smuzhiyun 		bfa_iocpf_timer_stop(ioc);
1048*4882a593Smuzhiyun 		fallthrough;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	case IOCPF_E_TIMEOUT:
1051*4882a593Smuzhiyun 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1053*4882a593Smuzhiyun 		break;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	case IOCPF_E_FWRSP_ENABLE:
1056*4882a593Smuzhiyun 		break;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	default:
1059*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1060*4882a593Smuzhiyun 	}
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s * iocpf)1064*4882a593Smuzhiyun bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(iocpf->ioc);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun /*
1070*4882a593Smuzhiyun  * IOC hb ack request is being removed.
1071*4882a593Smuzhiyun  */
1072*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1073*4882a593Smuzhiyun bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	switch (event) {
1080*4882a593Smuzhiyun 	case IOCPF_E_SEMLOCKED:
1081*4882a593Smuzhiyun 		bfa_ioc_sync_leave(ioc);
1082*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1083*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1084*4882a593Smuzhiyun 		break;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	case IOCPF_E_SEM_ERROR:
1087*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1088*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1089*4882a593Smuzhiyun 		break;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	case IOCPF_E_FAIL:
1092*4882a593Smuzhiyun 		break;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	default:
1095*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun  * IOC disable completion entry.
1101*4882a593Smuzhiyun  */
1102*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s * iocpf)1103*4882a593Smuzhiyun bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	bfa_ioc_mbox_flush(iocpf->ioc);
1106*4882a593Smuzhiyun 	bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun static void
bfa_iocpf_sm_disabled(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1110*4882a593Smuzhiyun bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	switch (event) {
1117*4882a593Smuzhiyun 	case IOCPF_E_ENABLE:
1118*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1119*4882a593Smuzhiyun 		break;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	case IOCPF_E_STOP:
1122*4882a593Smuzhiyun 		bfa_ioc_firmware_unlock(ioc);
1123*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1124*4882a593Smuzhiyun 		break;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	default:
1127*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s * iocpf)1132*4882a593Smuzhiyun bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	bfa_ioc_debug_save_ftrc(iocpf->ioc);
1135*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(iocpf->ioc);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /*
1139*4882a593Smuzhiyun  * Hardware initialization failed.
1140*4882a593Smuzhiyun  */
1141*4882a593Smuzhiyun static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1142*4882a593Smuzhiyun bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1143*4882a593Smuzhiyun {
1144*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	switch (event) {
1149*4882a593Smuzhiyun 	case IOCPF_E_SEMLOCKED:
1150*4882a593Smuzhiyun 		bfa_ioc_notify_fail(ioc);
1151*4882a593Smuzhiyun 		bfa_ioc_sync_leave(ioc);
1152*4882a593Smuzhiyun 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1153*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.ioc_sem_reg);
1154*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
1155*4882a593Smuzhiyun 		break;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	case IOCPF_E_SEM_ERROR:
1158*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1159*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1160*4882a593Smuzhiyun 		break;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
1163*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
1164*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1165*4882a593Smuzhiyun 		break;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	case IOCPF_E_STOP:
1168*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
1169*4882a593Smuzhiyun 		bfa_ioc_firmware_unlock(ioc);
1170*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1171*4882a593Smuzhiyun 		break;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	case IOCPF_E_FAIL:
1174*4882a593Smuzhiyun 		break;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	default:
1177*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun static void
bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s * iocpf)1182*4882a593Smuzhiyun bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	bfa_trc(iocpf->ioc, 0);
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun /*
1188*4882a593Smuzhiyun  * Hardware initialization failed.
1189*4882a593Smuzhiyun  */
1190*4882a593Smuzhiyun static void
bfa_iocpf_sm_initfail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1191*4882a593Smuzhiyun bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	switch (event) {
1198*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
1199*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1200*4882a593Smuzhiyun 		break;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	case IOCPF_E_STOP:
1203*4882a593Smuzhiyun 		bfa_ioc_firmware_unlock(ioc);
1204*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1205*4882a593Smuzhiyun 		break;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	default:
1208*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1209*4882a593Smuzhiyun 	}
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun static void
bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s * iocpf)1213*4882a593Smuzhiyun bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	/*
1216*4882a593Smuzhiyun 	 * Mark IOC as failed in hardware and stop firmware.
1217*4882a593Smuzhiyun 	 */
1218*4882a593Smuzhiyun 	bfa_ioc_lpu_stop(iocpf->ioc);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	/*
1221*4882a593Smuzhiyun 	 * Flush any queued up mailbox requests.
1222*4882a593Smuzhiyun 	 */
1223*4882a593Smuzhiyun 	bfa_ioc_mbox_flush(iocpf->ioc);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(iocpf->ioc);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1229*4882a593Smuzhiyun bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	switch (event) {
1236*4882a593Smuzhiyun 	case IOCPF_E_SEMLOCKED:
1237*4882a593Smuzhiyun 		bfa_ioc_sync_ack(ioc);
1238*4882a593Smuzhiyun 		bfa_ioc_notify_fail(ioc);
1239*4882a593Smuzhiyun 		if (!iocpf->auto_recover) {
1240*4882a593Smuzhiyun 			bfa_ioc_sync_leave(ioc);
1241*4882a593Smuzhiyun 			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1242*4882a593Smuzhiyun 			writel(1, ioc->ioc_regs.ioc_sem_reg);
1243*4882a593Smuzhiyun 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1244*4882a593Smuzhiyun 		} else {
1245*4882a593Smuzhiyun 			if (bfa_ioc_sync_complete(ioc))
1246*4882a593Smuzhiyun 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1247*4882a593Smuzhiyun 			else {
1248*4882a593Smuzhiyun 				writel(1, ioc->ioc_regs.ioc_sem_reg);
1249*4882a593Smuzhiyun 				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1250*4882a593Smuzhiyun 			}
1251*4882a593Smuzhiyun 		}
1252*4882a593Smuzhiyun 		break;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	case IOCPF_E_SEM_ERROR:
1255*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1256*4882a593Smuzhiyun 		bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1257*4882a593Smuzhiyun 		break;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
1260*4882a593Smuzhiyun 		bfa_sem_timer_stop(ioc);
1261*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1262*4882a593Smuzhiyun 		break;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	case IOCPF_E_FAIL:
1265*4882a593Smuzhiyun 		break;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	default:
1268*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun static void
bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s * iocpf)1273*4882a593Smuzhiyun bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	bfa_trc(iocpf->ioc, 0);
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun /*
1279*4882a593Smuzhiyun  * IOC is in failed state.
1280*4882a593Smuzhiyun  */
1281*4882a593Smuzhiyun static void
bfa_iocpf_sm_fail(struct bfa_iocpf_s * iocpf,enum iocpf_event event)1282*4882a593Smuzhiyun bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = iocpf->ioc;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	bfa_trc(ioc, event);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	switch (event) {
1289*4882a593Smuzhiyun 	case IOCPF_E_DISABLE:
1290*4882a593Smuzhiyun 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1291*4882a593Smuzhiyun 		break;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	default:
1294*4882a593Smuzhiyun 		bfa_sm_fault(ioc, event);
1295*4882a593Smuzhiyun 	}
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun /*
1299*4882a593Smuzhiyun  *  BFA IOC private functions
1300*4882a593Smuzhiyun  */
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun /*
1303*4882a593Smuzhiyun  * Notify common modules registered for notification.
1304*4882a593Smuzhiyun  */
1305*4882a593Smuzhiyun static void
bfa_ioc_event_notify(struct bfa_ioc_s * ioc,enum bfa_ioc_event_e event)1306*4882a593Smuzhiyun bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1307*4882a593Smuzhiyun {
1308*4882a593Smuzhiyun 	struct bfa_ioc_notify_s	*notify;
1309*4882a593Smuzhiyun 	struct list_head	*qe;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	list_for_each(qe, &ioc->notify_q) {
1312*4882a593Smuzhiyun 		notify = (struct bfa_ioc_notify_s *)qe;
1313*4882a593Smuzhiyun 		notify->cbfn(notify->cbarg, event);
1314*4882a593Smuzhiyun 	}
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun static void
bfa_ioc_disable_comp(struct bfa_ioc_s * ioc)1318*4882a593Smuzhiyun bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun 	ioc->cbfn->disable_cbfn(ioc->bfa);
1321*4882a593Smuzhiyun 	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_sem_get(void __iomem * sem_reg)1325*4882a593Smuzhiyun bfa_ioc_sem_get(void __iomem *sem_reg)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun 	u32 r32;
1328*4882a593Smuzhiyun 	int cnt = 0;
1329*4882a593Smuzhiyun #define BFA_SEM_SPINCNT	3000
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	r32 = readl(sem_reg);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1334*4882a593Smuzhiyun 		cnt++;
1335*4882a593Smuzhiyun 		udelay(2);
1336*4882a593Smuzhiyun 		r32 = readl(sem_reg);
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	if (!(r32 & 1))
1340*4882a593Smuzhiyun 		return BFA_TRUE;
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	return BFA_FALSE;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun static void
bfa_ioc_hw_sem_get(struct bfa_ioc_s * ioc)1346*4882a593Smuzhiyun bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun 	u32	r32;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	/*
1351*4882a593Smuzhiyun 	 * First read to the semaphore register will return 0, subsequent reads
1352*4882a593Smuzhiyun 	 * will return 1. Semaphore is released by writing 1 to the register
1353*4882a593Smuzhiyun 	 */
1354*4882a593Smuzhiyun 	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1355*4882a593Smuzhiyun 	if (r32 == ~0) {
1356*4882a593Smuzhiyun 		WARN_ON(r32 == ~0);
1357*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1358*4882a593Smuzhiyun 		return;
1359*4882a593Smuzhiyun 	}
1360*4882a593Smuzhiyun 	if (!(r32 & 1)) {
1361*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1362*4882a593Smuzhiyun 		return;
1363*4882a593Smuzhiyun 	}
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	bfa_sem_timer_start(ioc);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun /*
1369*4882a593Smuzhiyun  * Initialize LPU local memory (aka secondary memory / SRAM)
1370*4882a593Smuzhiyun  */
1371*4882a593Smuzhiyun static void
bfa_ioc_lmem_init(struct bfa_ioc_s * ioc)1372*4882a593Smuzhiyun bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun 	u32	pss_ctl;
1375*4882a593Smuzhiyun 	int		i;
1376*4882a593Smuzhiyun #define PSS_LMEM_INIT_TIME  10000
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1379*4882a593Smuzhiyun 	pss_ctl &= ~__PSS_LMEM_RESET;
1380*4882a593Smuzhiyun 	pss_ctl |= __PSS_LMEM_INIT_EN;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	/*
1383*4882a593Smuzhiyun 	 * i2c workaround 12.5khz clock
1384*4882a593Smuzhiyun 	 */
1385*4882a593Smuzhiyun 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1386*4882a593Smuzhiyun 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	/*
1389*4882a593Smuzhiyun 	 * wait for memory initialization to be complete
1390*4882a593Smuzhiyun 	 */
1391*4882a593Smuzhiyun 	i = 0;
1392*4882a593Smuzhiyun 	do {
1393*4882a593Smuzhiyun 		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1394*4882a593Smuzhiyun 		i++;
1395*4882a593Smuzhiyun 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	/*
1398*4882a593Smuzhiyun 	 * If memory initialization is not successful, IOC timeout will catch
1399*4882a593Smuzhiyun 	 * such failures.
1400*4882a593Smuzhiyun 	 */
1401*4882a593Smuzhiyun 	WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1402*4882a593Smuzhiyun 	bfa_trc(ioc, pss_ctl);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1405*4882a593Smuzhiyun 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun static void
bfa_ioc_lpu_start(struct bfa_ioc_s * ioc)1409*4882a593Smuzhiyun bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun 	u32	pss_ctl;
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/*
1414*4882a593Smuzhiyun 	 * Take processor out of reset.
1415*4882a593Smuzhiyun 	 */
1416*4882a593Smuzhiyun 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1417*4882a593Smuzhiyun 	pss_ctl &= ~__PSS_LPU0_RESET;
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun static void
bfa_ioc_lpu_stop(struct bfa_ioc_s * ioc)1423*4882a593Smuzhiyun bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun 	u32	pss_ctl;
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	/*
1428*4882a593Smuzhiyun 	 * Put processors in reset.
1429*4882a593Smuzhiyun 	 */
1430*4882a593Smuzhiyun 	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1431*4882a593Smuzhiyun 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun /*
1437*4882a593Smuzhiyun  * Get driver and firmware versions.
1438*4882a593Smuzhiyun  */
1439*4882a593Smuzhiyun void
bfa_ioc_fwver_get(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * fwhdr)1440*4882a593Smuzhiyun bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	u32	pgnum;
1443*4882a593Smuzhiyun 	u32	loff = 0;
1444*4882a593Smuzhiyun 	int		i;
1445*4882a593Smuzhiyun 	u32	*fwsig = (u32 *) fwhdr;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1448*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1451*4882a593Smuzhiyun 	     i++) {
1452*4882a593Smuzhiyun 		fwsig[i] =
1453*4882a593Smuzhiyun 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1454*4882a593Smuzhiyun 		loff += sizeof(u32);
1455*4882a593Smuzhiyun 	}
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun /*
1459*4882a593Smuzhiyun  * Returns TRUE if driver is willing to work with current smem f/w version.
1460*4882a593Smuzhiyun  */
1461*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * smem_fwhdr)1462*4882a593Smuzhiyun bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
1463*4882a593Smuzhiyun 		struct bfi_ioc_image_hdr_s *smem_fwhdr)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
1466*4882a593Smuzhiyun 	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1469*4882a593Smuzhiyun 		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	/*
1472*4882a593Smuzhiyun 	 * If smem is incompatible or old, driver should not work with it.
1473*4882a593Smuzhiyun 	 */
1474*4882a593Smuzhiyun 	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr);
1475*4882a593Smuzhiyun 	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1476*4882a593Smuzhiyun 		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1477*4882a593Smuzhiyun 		return BFA_FALSE;
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/*
1481*4882a593Smuzhiyun 	 * IF Flash has a better F/W than smem do not work with smem.
1482*4882a593Smuzhiyun 	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1483*4882a593Smuzhiyun 	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1484*4882a593Smuzhiyun 	 */
1485*4882a593Smuzhiyun 	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) {
1488*4882a593Smuzhiyun 		return BFA_FALSE;
1489*4882a593Smuzhiyun 	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) {
1490*4882a593Smuzhiyun 		return BFA_TRUE;
1491*4882a593Smuzhiyun 	} else {
1492*4882a593Smuzhiyun 		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1493*4882a593Smuzhiyun 			BFA_TRUE : BFA_FALSE;
1494*4882a593Smuzhiyun 	}
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun /*
1498*4882a593Smuzhiyun  * Return true if current running version is valid. Firmware signature and
1499*4882a593Smuzhiyun  * execution context (driver/bios) must match.
1500*4882a593Smuzhiyun  */
1501*4882a593Smuzhiyun static bfa_boolean_t
bfa_ioc_fwver_valid(struct bfa_ioc_s * ioc,u32 boot_env)1502*4882a593Smuzhiyun bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	struct bfi_ioc_image_hdr_s fwhdr;
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	bfa_ioc_fwver_get(ioc, &fwhdr);
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	if (swab32(fwhdr.bootenv) != boot_env) {
1509*4882a593Smuzhiyun 		bfa_trc(ioc, fwhdr.bootenv);
1510*4882a593Smuzhiyun 		bfa_trc(ioc, boot_env);
1511*4882a593Smuzhiyun 		return BFA_FALSE;
1512*4882a593Smuzhiyun 	}
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun static bfa_boolean_t
bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s * fwhdr_1,struct bfi_ioc_image_hdr_s * fwhdr_2)1518*4882a593Smuzhiyun bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1,
1519*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *fwhdr_2)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	int i;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++)
1524*4882a593Smuzhiyun 		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1525*4882a593Smuzhiyun 			return BFA_FALSE;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	return BFA_TRUE;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun /*
1531*4882a593Smuzhiyun  * Returns TRUE if major minor and maintainence are same.
1532*4882a593Smuzhiyun  * If patch versions are same, check for MD5 Checksum to be same.
1533*4882a593Smuzhiyun  */
1534*4882a593Smuzhiyun static bfa_boolean_t
bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s * drv_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1535*4882a593Smuzhiyun bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr,
1536*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1539*4882a593Smuzhiyun 		return BFA_FALSE;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1542*4882a593Smuzhiyun 		return BFA_FALSE;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1545*4882a593Smuzhiyun 		return BFA_FALSE;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1548*4882a593Smuzhiyun 		return BFA_FALSE;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1551*4882a593Smuzhiyun 		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1552*4882a593Smuzhiyun 		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) {
1553*4882a593Smuzhiyun 		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1554*4882a593Smuzhiyun 	}
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	return BFA_TRUE;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun static bfa_boolean_t
bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s * flash_fwhdr)1560*4882a593Smuzhiyun bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun 	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1563*4882a593Smuzhiyun 		return BFA_FALSE;
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	return BFA_TRUE;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun 
fwhdr_is_ga(struct bfi_ioc_image_hdr_s * fwhdr)1568*4882a593Smuzhiyun static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun 	if (fwhdr->fwver.phase == 0 &&
1571*4882a593Smuzhiyun 		fwhdr->fwver.build == 0)
1572*4882a593Smuzhiyun 		return BFA_TRUE;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	return BFA_FALSE;
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun /*
1578*4882a593Smuzhiyun  * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better.
1579*4882a593Smuzhiyun  */
1580*4882a593Smuzhiyun static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s * base_fwhdr,struct bfi_ioc_image_hdr_s * fwhdr_to_cmp)1581*4882a593Smuzhiyun bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr,
1582*4882a593Smuzhiyun 				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp)
1583*4882a593Smuzhiyun {
1584*4882a593Smuzhiyun 	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE)
1585*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_INCOMP;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1588*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_BETTER;
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1591*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_OLD;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	/*
1594*4882a593Smuzhiyun 	 * GA takes priority over internal builds of the same patch stream.
1595*4882a593Smuzhiyun 	 * At this point major minor maint and patch numbers are same.
1596*4882a593Smuzhiyun 	 */
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) {
1599*4882a593Smuzhiyun 		if (fwhdr_is_ga(fwhdr_to_cmp))
1600*4882a593Smuzhiyun 			return BFI_IOC_IMG_VER_SAME;
1601*4882a593Smuzhiyun 		else
1602*4882a593Smuzhiyun 			return BFI_IOC_IMG_VER_OLD;
1603*4882a593Smuzhiyun 	} else {
1604*4882a593Smuzhiyun 		if (fwhdr_is_ga(fwhdr_to_cmp))
1605*4882a593Smuzhiyun 			return BFI_IOC_IMG_VER_BETTER;
1606*4882a593Smuzhiyun 	}
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1609*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_BETTER;
1610*4882a593Smuzhiyun 	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1611*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_OLD;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1614*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_BETTER;
1615*4882a593Smuzhiyun 	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1616*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_OLD;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	/*
1619*4882a593Smuzhiyun 	 * All Version Numbers are equal.
1620*4882a593Smuzhiyun 	 * Md5 check to be done as a part of compatibility check.
1621*4882a593Smuzhiyun 	 */
1622*4882a593Smuzhiyun 	return BFI_IOC_IMG_VER_SAME;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun #define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun bfa_status_t
bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s * ioc,u32 off,u32 * fwimg)1628*4882a593Smuzhiyun bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off,
1629*4882a593Smuzhiyun 				u32 *fwimg)
1630*4882a593Smuzhiyun {
1631*4882a593Smuzhiyun 	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1632*4882a593Smuzhiyun 			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1633*4882a593Smuzhiyun 			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun static enum bfi_ioc_img_ver_cmp_e
bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s * ioc,struct bfi_ioc_image_hdr_s * base_fwhdr)1637*4882a593Smuzhiyun bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc,
1638*4882a593Smuzhiyun 			struct bfi_ioc_image_hdr_s *base_fwhdr)
1639*4882a593Smuzhiyun {
1640*4882a593Smuzhiyun 	struct bfi_ioc_image_hdr_s *flash_fwhdr;
1641*4882a593Smuzhiyun 	bfa_status_t status;
1642*4882a593Smuzhiyun 	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1645*4882a593Smuzhiyun 	if (status != BFA_STATUS_OK)
1646*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_INCOMP;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg;
1649*4882a593Smuzhiyun 	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE)
1650*4882a593Smuzhiyun 		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1651*4882a593Smuzhiyun 	else
1652*4882a593Smuzhiyun 		return BFI_IOC_IMG_VER_INCOMP;
1653*4882a593Smuzhiyun }
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun /*
1657*4882a593Smuzhiyun  * Invalidate fwver signature
1658*4882a593Smuzhiyun  */
1659*4882a593Smuzhiyun bfa_status_t
bfa_ioc_fwsig_invalidate(struct bfa_ioc_s * ioc)1660*4882a593Smuzhiyun bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	u32	pgnum;
1664*4882a593Smuzhiyun 	u32	loff = 0;
1665*4882a593Smuzhiyun 	enum bfi_ioc_state ioc_fwstate;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1668*4882a593Smuzhiyun 	if (!bfa_ioc_state_disabled(ioc_fwstate))
1669*4882a593Smuzhiyun 		return BFA_STATUS_ADAPTER_ENABLED;
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1672*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1673*4882a593Smuzhiyun 	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	return BFA_STATUS_OK;
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun /*
1679*4882a593Smuzhiyun  * Conditionally flush any pending message from firmware at start.
1680*4882a593Smuzhiyun  */
1681*4882a593Smuzhiyun static void
bfa_ioc_msgflush(struct bfa_ioc_s * ioc)1682*4882a593Smuzhiyun bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun 	u32	r32;
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1687*4882a593Smuzhiyun 	if (r32)
1688*4882a593Smuzhiyun 		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1689*4882a593Smuzhiyun }
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun static void
bfa_ioc_hwinit(struct bfa_ioc_s * ioc,bfa_boolean_t force)1692*4882a593Smuzhiyun bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1693*4882a593Smuzhiyun {
1694*4882a593Smuzhiyun 	enum bfi_ioc_state ioc_fwstate;
1695*4882a593Smuzhiyun 	bfa_boolean_t fwvalid;
1696*4882a593Smuzhiyun 	u32 boot_type;
1697*4882a593Smuzhiyun 	u32 boot_env;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	if (force)
1702*4882a593Smuzhiyun 		ioc_fwstate = BFI_IOC_UNINIT;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	bfa_trc(ioc, ioc_fwstate);
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	boot_type = BFI_FWBOOT_TYPE_NORMAL;
1707*4882a593Smuzhiyun 	boot_env = BFI_FWBOOT_ENV_OS;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	/*
1710*4882a593Smuzhiyun 	 * check if firmware is valid
1711*4882a593Smuzhiyun 	 */
1712*4882a593Smuzhiyun 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1713*4882a593Smuzhiyun 		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	if (!fwvalid) {
1716*4882a593Smuzhiyun 		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1717*4882a593Smuzhiyun 			bfa_ioc_poll_fwinit(ioc);
1718*4882a593Smuzhiyun 		return;
1719*4882a593Smuzhiyun 	}
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	/*
1722*4882a593Smuzhiyun 	 * If hardware initialization is in progress (initialized by other IOC),
1723*4882a593Smuzhiyun 	 * just wait for an initialization completion interrupt.
1724*4882a593Smuzhiyun 	 */
1725*4882a593Smuzhiyun 	if (ioc_fwstate == BFI_IOC_INITING) {
1726*4882a593Smuzhiyun 		bfa_ioc_poll_fwinit(ioc);
1727*4882a593Smuzhiyun 		return;
1728*4882a593Smuzhiyun 	}
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	/*
1731*4882a593Smuzhiyun 	 * If IOC function is disabled and firmware version is same,
1732*4882a593Smuzhiyun 	 * just re-enable IOC.
1733*4882a593Smuzhiyun 	 *
1734*4882a593Smuzhiyun 	 * If option rom, IOC must not be in operational state. With
1735*4882a593Smuzhiyun 	 * convergence, IOC will be in operational state when 2nd driver
1736*4882a593Smuzhiyun 	 * is loaded.
1737*4882a593Smuzhiyun 	 */
1738*4882a593Smuzhiyun 	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 		/*
1741*4882a593Smuzhiyun 		 * When using MSI-X any pending firmware ready event should
1742*4882a593Smuzhiyun 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1743*4882a593Smuzhiyun 		 */
1744*4882a593Smuzhiyun 		bfa_ioc_msgflush(ioc);
1745*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1746*4882a593Smuzhiyun 		return;
1747*4882a593Smuzhiyun 	}
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	/*
1750*4882a593Smuzhiyun 	 * Initialize the h/w for any other states.
1751*4882a593Smuzhiyun 	 */
1752*4882a593Smuzhiyun 	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK)
1753*4882a593Smuzhiyun 		bfa_ioc_poll_fwinit(ioc);
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun static void
bfa_ioc_timeout(void * ioc_arg)1757*4882a593Smuzhiyun bfa_ioc_timeout(void *ioc_arg)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	bfa_trc(ioc, 0);
1762*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun void
bfa_ioc_mbox_send(struct bfa_ioc_s * ioc,void * ioc_msg,int len)1766*4882a593Smuzhiyun bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun 	u32 *msgp = (u32 *) ioc_msg;
1769*4882a593Smuzhiyun 	u32 i;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	bfa_trc(ioc, msgp[0]);
1772*4882a593Smuzhiyun 	bfa_trc(ioc, len);
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	WARN_ON(len > BFI_IOC_MSGLEN_MAX);
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	/*
1777*4882a593Smuzhiyun 	 * first write msg to mailbox registers
1778*4882a593Smuzhiyun 	 */
1779*4882a593Smuzhiyun 	for (i = 0; i < len / sizeof(u32); i++)
1780*4882a593Smuzhiyun 		writel(cpu_to_le32(msgp[i]),
1781*4882a593Smuzhiyun 			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1784*4882a593Smuzhiyun 		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	/*
1787*4882a593Smuzhiyun 	 * write 1 to mailbox CMD to trigger LPU event
1788*4882a593Smuzhiyun 	 */
1789*4882a593Smuzhiyun 	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1790*4882a593Smuzhiyun 	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun static void
bfa_ioc_send_enable(struct bfa_ioc_s * ioc)1794*4882a593Smuzhiyun bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun 	struct bfi_ioc_ctrl_req_s enable_req;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1799*4882a593Smuzhiyun 		    bfa_ioc_portid(ioc));
1800*4882a593Smuzhiyun 	enable_req.clscode = cpu_to_be16(ioc->clscode);
1801*4882a593Smuzhiyun 	/* unsigned 32-bit time_t overflow in y2106 */
1802*4882a593Smuzhiyun 	enable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1803*4882a593Smuzhiyun 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun static void
bfa_ioc_send_disable(struct bfa_ioc_s * ioc)1807*4882a593Smuzhiyun bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1808*4882a593Smuzhiyun {
1809*4882a593Smuzhiyun 	struct bfi_ioc_ctrl_req_s disable_req;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1812*4882a593Smuzhiyun 		    bfa_ioc_portid(ioc));
1813*4882a593Smuzhiyun 	disable_req.clscode = cpu_to_be16(ioc->clscode);
1814*4882a593Smuzhiyun 	/* unsigned 32-bit time_t overflow in y2106 */
1815*4882a593Smuzhiyun 	disable_req.tv_sec = be32_to_cpu(ktime_get_real_seconds());
1816*4882a593Smuzhiyun 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun static void
bfa_ioc_send_getattr(struct bfa_ioc_s * ioc)1820*4882a593Smuzhiyun bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun 	struct bfi_ioc_getattr_req_s	attr_req;
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1825*4882a593Smuzhiyun 		    bfa_ioc_portid(ioc));
1826*4882a593Smuzhiyun 	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1827*4882a593Smuzhiyun 	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1828*4882a593Smuzhiyun }
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun static void
bfa_ioc_hb_check(void * cbarg)1831*4882a593Smuzhiyun bfa_ioc_hb_check(void *cbarg)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun 	struct bfa_ioc_s  *ioc = cbarg;
1834*4882a593Smuzhiyun 	u32	hb_count;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 	hb_count = readl(ioc->ioc_regs.heartbeat);
1837*4882a593Smuzhiyun 	if (ioc->hb_count == hb_count) {
1838*4882a593Smuzhiyun 		bfa_ioc_recover(ioc);
1839*4882a593Smuzhiyun 		return;
1840*4882a593Smuzhiyun 	} else {
1841*4882a593Smuzhiyun 		ioc->hb_count = hb_count;
1842*4882a593Smuzhiyun 	}
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	bfa_ioc_mbox_poll(ioc);
1845*4882a593Smuzhiyun 	bfa_hb_timer_start(ioc);
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun static void
bfa_ioc_hb_monitor(struct bfa_ioc_s * ioc)1849*4882a593Smuzhiyun bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun 	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1852*4882a593Smuzhiyun 	bfa_hb_timer_start(ioc);
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun /*
1856*4882a593Smuzhiyun  *	Initiate a full firmware download.
1857*4882a593Smuzhiyun  */
1858*4882a593Smuzhiyun static bfa_status_t
bfa_ioc_download_fw(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)1859*4882a593Smuzhiyun bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1860*4882a593Smuzhiyun 		    u32 boot_env)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun 	u32 *fwimg;
1863*4882a593Smuzhiyun 	u32 pgnum;
1864*4882a593Smuzhiyun 	u32 loff = 0;
1865*4882a593Smuzhiyun 	u32 chunkno = 0;
1866*4882a593Smuzhiyun 	u32 i;
1867*4882a593Smuzhiyun 	u32 asicmode;
1868*4882a593Smuzhiyun 	u32 fwimg_size;
1869*4882a593Smuzhiyun 	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
1870*4882a593Smuzhiyun 	bfa_status_t status;
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1873*4882a593Smuzhiyun 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1874*4882a593Smuzhiyun 		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 		status = bfa_ioc_flash_img_get_chnk(ioc,
1877*4882a593Smuzhiyun 			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
1878*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK)
1879*4882a593Smuzhiyun 			return status;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 		fwimg = fwimg_buf;
1882*4882a593Smuzhiyun 	} else {
1883*4882a593Smuzhiyun 		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
1884*4882a593Smuzhiyun 		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
1885*4882a593Smuzhiyun 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1886*4882a593Smuzhiyun 	}
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	bfa_trc(ioc, fwimg_size);
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1892*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun 	for (i = 0; i < fwimg_size; i++) {
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1897*4882a593Smuzhiyun 			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 			if (boot_env == BFI_FWBOOT_ENV_OS &&
1900*4882a593Smuzhiyun 				boot_type == BFI_FWBOOT_TYPE_FLASH) {
1901*4882a593Smuzhiyun 				status = bfa_ioc_flash_img_get_chnk(ioc,
1902*4882a593Smuzhiyun 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
1903*4882a593Smuzhiyun 					fwimg_buf);
1904*4882a593Smuzhiyun 				if (status != BFA_STATUS_OK)
1905*4882a593Smuzhiyun 					return status;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 				fwimg = fwimg_buf;
1908*4882a593Smuzhiyun 			} else {
1909*4882a593Smuzhiyun 				fwimg = bfa_cb_image_get_chunk(
1910*4882a593Smuzhiyun 					bfa_ioc_asic_gen(ioc),
1911*4882a593Smuzhiyun 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1912*4882a593Smuzhiyun 			}
1913*4882a593Smuzhiyun 		}
1914*4882a593Smuzhiyun 
1915*4882a593Smuzhiyun 		/*
1916*4882a593Smuzhiyun 		 * write smem
1917*4882a593Smuzhiyun 		 */
1918*4882a593Smuzhiyun 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1919*4882a593Smuzhiyun 			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 		loff += sizeof(u32);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 		/*
1924*4882a593Smuzhiyun 		 * handle page offset wrap around
1925*4882a593Smuzhiyun 		 */
1926*4882a593Smuzhiyun 		loff = PSS_SMEM_PGOFF(loff);
1927*4882a593Smuzhiyun 		if (loff == 0) {
1928*4882a593Smuzhiyun 			pgnum++;
1929*4882a593Smuzhiyun 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1930*4882a593Smuzhiyun 		}
1931*4882a593Smuzhiyun 	}
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1934*4882a593Smuzhiyun 			ioc->ioc_regs.host_page_num_fn);
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	/*
1937*4882a593Smuzhiyun 	 * Set boot type, env and device mode at the end.
1938*4882a593Smuzhiyun 	 */
1939*4882a593Smuzhiyun 	if (boot_env == BFI_FWBOOT_ENV_OS &&
1940*4882a593Smuzhiyun 		boot_type == BFI_FWBOOT_TYPE_FLASH) {
1941*4882a593Smuzhiyun 		boot_type = BFI_FWBOOT_TYPE_NORMAL;
1942*4882a593Smuzhiyun 	}
1943*4882a593Smuzhiyun 	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1944*4882a593Smuzhiyun 				ioc->port0_mode, ioc->port1_mode);
1945*4882a593Smuzhiyun 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1946*4882a593Smuzhiyun 			swab32(asicmode));
1947*4882a593Smuzhiyun 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
1948*4882a593Smuzhiyun 			swab32(boot_type));
1949*4882a593Smuzhiyun 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
1950*4882a593Smuzhiyun 			swab32(boot_env));
1951*4882a593Smuzhiyun 	return BFA_STATUS_OK;
1952*4882a593Smuzhiyun }
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun /*
1956*4882a593Smuzhiyun  * Update BFA configuration from firmware configuration.
1957*4882a593Smuzhiyun  */
1958*4882a593Smuzhiyun static void
bfa_ioc_getattr_reply(struct bfa_ioc_s * ioc)1959*4882a593Smuzhiyun bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun 	struct bfi_ioc_attr_s	*attr = ioc->attr;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
1964*4882a593Smuzhiyun 	attr->card_type     = be32_to_cpu(attr->card_type);
1965*4882a593Smuzhiyun 	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
1966*4882a593Smuzhiyun 	ioc->fcmode	= (attr->port_mode == BFI_PORT_MODE_FC);
1967*4882a593Smuzhiyun 	attr->mfg_year	= be16_to_cpu(attr->mfg_year);
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1970*4882a593Smuzhiyun }
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun /*
1973*4882a593Smuzhiyun  * Attach time initialization of mbox logic.
1974*4882a593Smuzhiyun  */
1975*4882a593Smuzhiyun static void
bfa_ioc_mbox_attach(struct bfa_ioc_s * ioc)1976*4882a593Smuzhiyun bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1979*4882a593Smuzhiyun 	int	mc;
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 	INIT_LIST_HEAD(&mod->cmd_q);
1982*4882a593Smuzhiyun 	for (mc = 0; mc < BFI_MC_MAX; mc++) {
1983*4882a593Smuzhiyun 		mod->mbhdlr[mc].cbfn = NULL;
1984*4882a593Smuzhiyun 		mod->mbhdlr[mc].cbarg = ioc->bfa;
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun /*
1989*4882a593Smuzhiyun  * Mbox poll timer -- restarts any pending mailbox requests.
1990*4882a593Smuzhiyun  */
1991*4882a593Smuzhiyun static void
bfa_ioc_mbox_poll(struct bfa_ioc_s * ioc)1992*4882a593Smuzhiyun bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
1995*4882a593Smuzhiyun 	struct bfa_mbox_cmd_s		*cmd;
1996*4882a593Smuzhiyun 	u32			stat;
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	/*
1999*4882a593Smuzhiyun 	 * If no command pending, do nothing
2000*4882a593Smuzhiyun 	 */
2001*4882a593Smuzhiyun 	if (list_empty(&mod->cmd_q))
2002*4882a593Smuzhiyun 		return;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	/*
2005*4882a593Smuzhiyun 	 * If previous command is not yet fetched by firmware, do nothing
2006*4882a593Smuzhiyun 	 */
2007*4882a593Smuzhiyun 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2008*4882a593Smuzhiyun 	if (stat)
2009*4882a593Smuzhiyun 		return;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	/*
2012*4882a593Smuzhiyun 	 * Enqueue command to firmware.
2013*4882a593Smuzhiyun 	 */
2014*4882a593Smuzhiyun 	bfa_q_deq(&mod->cmd_q, &cmd);
2015*4882a593Smuzhiyun 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun /*
2019*4882a593Smuzhiyun  * Cleanup any pending requests.
2020*4882a593Smuzhiyun  */
2021*4882a593Smuzhiyun static void
bfa_ioc_mbox_flush(struct bfa_ioc_s * ioc)2022*4882a593Smuzhiyun bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2025*4882a593Smuzhiyun 	struct bfa_mbox_cmd_s		*cmd;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	while (!list_empty(&mod->cmd_q))
2028*4882a593Smuzhiyun 		bfa_q_deq(&mod->cmd_q, &cmd);
2029*4882a593Smuzhiyun }
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun /*
2032*4882a593Smuzhiyun  * Read data from SMEM to host through PCI memmap
2033*4882a593Smuzhiyun  *
2034*4882a593Smuzhiyun  * @param[in]	ioc	memory for IOC
2035*4882a593Smuzhiyun  * @param[in]	tbuf	app memory to store data from smem
2036*4882a593Smuzhiyun  * @param[in]	soff	smem offset
2037*4882a593Smuzhiyun  * @param[in]	sz	size of smem in bytes
2038*4882a593Smuzhiyun  */
2039*4882a593Smuzhiyun static bfa_status_t
bfa_ioc_smem_read(struct bfa_ioc_s * ioc,void * tbuf,u32 soff,u32 sz)2040*4882a593Smuzhiyun bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun 	u32 pgnum, loff;
2043*4882a593Smuzhiyun 	__be32 r32;
2044*4882a593Smuzhiyun 	int i, len;
2045*4882a593Smuzhiyun 	u32 *buf = tbuf;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2048*4882a593Smuzhiyun 	loff = PSS_SMEM_PGOFF(soff);
2049*4882a593Smuzhiyun 	bfa_trc(ioc, pgnum);
2050*4882a593Smuzhiyun 	bfa_trc(ioc, loff);
2051*4882a593Smuzhiyun 	bfa_trc(ioc, sz);
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	/*
2054*4882a593Smuzhiyun 	 *  Hold semaphore to serialize pll init and fwtrc.
2055*4882a593Smuzhiyun 	 */
2056*4882a593Smuzhiyun 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2057*4882a593Smuzhiyun 		bfa_trc(ioc, 0);
2058*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
2059*4882a593Smuzhiyun 	}
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun 	len = sz/sizeof(u32);
2064*4882a593Smuzhiyun 	bfa_trc(ioc, len);
2065*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
2066*4882a593Smuzhiyun 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
2067*4882a593Smuzhiyun 		buf[i] = swab32(r32);
2068*4882a593Smuzhiyun 		loff += sizeof(u32);
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 		/*
2071*4882a593Smuzhiyun 		 * handle page offset wrap around
2072*4882a593Smuzhiyun 		 */
2073*4882a593Smuzhiyun 		loff = PSS_SMEM_PGOFF(loff);
2074*4882a593Smuzhiyun 		if (loff == 0) {
2075*4882a593Smuzhiyun 			pgnum++;
2076*4882a593Smuzhiyun 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2077*4882a593Smuzhiyun 		}
2078*4882a593Smuzhiyun 	}
2079*4882a593Smuzhiyun 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2080*4882a593Smuzhiyun 			ioc->ioc_regs.host_page_num_fn);
2081*4882a593Smuzhiyun 	/*
2082*4882a593Smuzhiyun 	 *  release semaphore.
2083*4882a593Smuzhiyun 	 */
2084*4882a593Smuzhiyun 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2085*4882a593Smuzhiyun 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	bfa_trc(ioc, pgnum);
2088*4882a593Smuzhiyun 	return BFA_STATUS_OK;
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun /*
2092*4882a593Smuzhiyun  * Clear SMEM data from host through PCI memmap
2093*4882a593Smuzhiyun  *
2094*4882a593Smuzhiyun  * @param[in]	ioc	memory for IOC
2095*4882a593Smuzhiyun  * @param[in]	soff	smem offset
2096*4882a593Smuzhiyun  * @param[in]	sz	size of smem in bytes
2097*4882a593Smuzhiyun  */
2098*4882a593Smuzhiyun static bfa_status_t
bfa_ioc_smem_clr(struct bfa_ioc_s * ioc,u32 soff,u32 sz)2099*4882a593Smuzhiyun bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun 	int i, len;
2102*4882a593Smuzhiyun 	u32 pgnum, loff;
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2105*4882a593Smuzhiyun 	loff = PSS_SMEM_PGOFF(soff);
2106*4882a593Smuzhiyun 	bfa_trc(ioc, pgnum);
2107*4882a593Smuzhiyun 	bfa_trc(ioc, loff);
2108*4882a593Smuzhiyun 	bfa_trc(ioc, sz);
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	/*
2111*4882a593Smuzhiyun 	 *  Hold semaphore to serialize pll init and fwtrc.
2112*4882a593Smuzhiyun 	 */
2113*4882a593Smuzhiyun 	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
2114*4882a593Smuzhiyun 		bfa_trc(ioc, 0);
2115*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
2116*4882a593Smuzhiyun 	}
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	len = sz/sizeof(u32); /* len in words */
2121*4882a593Smuzhiyun 	bfa_trc(ioc, len);
2122*4882a593Smuzhiyun 	for (i = 0; i < len; i++) {
2123*4882a593Smuzhiyun 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
2124*4882a593Smuzhiyun 		loff += sizeof(u32);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 		/*
2127*4882a593Smuzhiyun 		 * handle page offset wrap around
2128*4882a593Smuzhiyun 		 */
2129*4882a593Smuzhiyun 		loff = PSS_SMEM_PGOFF(loff);
2130*4882a593Smuzhiyun 		if (loff == 0) {
2131*4882a593Smuzhiyun 			pgnum++;
2132*4882a593Smuzhiyun 			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2133*4882a593Smuzhiyun 		}
2134*4882a593Smuzhiyun 	}
2135*4882a593Smuzhiyun 	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2136*4882a593Smuzhiyun 			ioc->ioc_regs.host_page_num_fn);
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	/*
2139*4882a593Smuzhiyun 	 *  release semaphore.
2140*4882a593Smuzhiyun 	 */
2141*4882a593Smuzhiyun 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2142*4882a593Smuzhiyun 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2143*4882a593Smuzhiyun 	bfa_trc(ioc, pgnum);
2144*4882a593Smuzhiyun 	return BFA_STATUS_OK;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun static void
bfa_ioc_fail_notify(struct bfa_ioc_s * ioc)2148*4882a593Smuzhiyun bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	/*
2153*4882a593Smuzhiyun 	 * Notify driver and common modules registered for notification.
2154*4882a593Smuzhiyun 	 */
2155*4882a593Smuzhiyun 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2156*4882a593Smuzhiyun 	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	bfa_ioc_debug_save_ftrc(ioc);
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
2161*4882a593Smuzhiyun 		"Heart Beat of IOC has failed\n");
2162*4882a593Smuzhiyun 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun }
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun static void
bfa_ioc_pf_fwmismatch(struct bfa_ioc_s * ioc)2167*4882a593Smuzhiyun bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2168*4882a593Smuzhiyun {
2169*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2170*4882a593Smuzhiyun 	/*
2171*4882a593Smuzhiyun 	 * Provide enable completion callback.
2172*4882a593Smuzhiyun 	 */
2173*4882a593Smuzhiyun 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2174*4882a593Smuzhiyun 	BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
2175*4882a593Smuzhiyun 		"Running firmware version is incompatible "
2176*4882a593Smuzhiyun 		"with the driver version\n");
2177*4882a593Smuzhiyun 	bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
2178*4882a593Smuzhiyun }
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun bfa_status_t
bfa_ioc_pll_init(struct bfa_ioc_s * ioc)2181*4882a593Smuzhiyun bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2182*4882a593Smuzhiyun {
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 	/*
2185*4882a593Smuzhiyun 	 *  Hold semaphore so that nobody can access the chip during init.
2186*4882a593Smuzhiyun 	 */
2187*4882a593Smuzhiyun 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	bfa_ioc_pll_init_asic(ioc);
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	ioc->pllinit = BFA_TRUE;
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	/*
2194*4882a593Smuzhiyun 	 * Initialize LMEM
2195*4882a593Smuzhiyun 	 */
2196*4882a593Smuzhiyun 	bfa_ioc_lmem_init(ioc);
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	/*
2199*4882a593Smuzhiyun 	 *  release semaphore.
2200*4882a593Smuzhiyun 	 */
2201*4882a593Smuzhiyun 	readl(ioc->ioc_regs.ioc_init_sem_reg);
2202*4882a593Smuzhiyun 	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun 	return BFA_STATUS_OK;
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun /*
2208*4882a593Smuzhiyun  * Interface used by diag module to do firmware boot with memory test
2209*4882a593Smuzhiyun  * as the entry vector.
2210*4882a593Smuzhiyun  */
2211*4882a593Smuzhiyun bfa_status_t
bfa_ioc_boot(struct bfa_ioc_s * ioc,u32 boot_type,u32 boot_env)2212*4882a593Smuzhiyun bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
2213*4882a593Smuzhiyun {
2214*4882a593Smuzhiyun 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
2215*4882a593Smuzhiyun 	bfa_status_t status;
2216*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_boots);
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2219*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	if (boot_env == BFI_FWBOOT_ENV_OS &&
2222*4882a593Smuzhiyun 		boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 		drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
2225*4882a593Smuzhiyun 			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 		/*
2228*4882a593Smuzhiyun 		 * Work with Flash iff flash f/w is better than driver f/w.
2229*4882a593Smuzhiyun 		 * Otherwise push drivers firmware.
2230*4882a593Smuzhiyun 		 */
2231*4882a593Smuzhiyun 		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2232*4882a593Smuzhiyun 						BFI_IOC_IMG_VER_BETTER)
2233*4882a593Smuzhiyun 			boot_type = BFI_FWBOOT_TYPE_FLASH;
2234*4882a593Smuzhiyun 	}
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	/*
2237*4882a593Smuzhiyun 	 * Initialize IOC state of all functions on a chip reset.
2238*4882a593Smuzhiyun 	 */
2239*4882a593Smuzhiyun 	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2240*4882a593Smuzhiyun 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2241*4882a593Smuzhiyun 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2242*4882a593Smuzhiyun 	} else {
2243*4882a593Smuzhiyun 		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2244*4882a593Smuzhiyun 		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2245*4882a593Smuzhiyun 	}
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	bfa_ioc_msgflush(ioc);
2248*4882a593Smuzhiyun 	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2249*4882a593Smuzhiyun 	if (status == BFA_STATUS_OK)
2250*4882a593Smuzhiyun 		bfa_ioc_lpu_start(ioc);
2251*4882a593Smuzhiyun 	else {
2252*4882a593Smuzhiyun 		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST);
2253*4882a593Smuzhiyun 		bfa_iocpf_timeout(ioc);
2254*4882a593Smuzhiyun 	}
2255*4882a593Smuzhiyun 	return status;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun 
2258*4882a593Smuzhiyun /*
2259*4882a593Smuzhiyun  * Enable/disable IOC failure auto recovery.
2260*4882a593Smuzhiyun  */
2261*4882a593Smuzhiyun void
bfa_ioc_auto_recover(bfa_boolean_t auto_recover)2262*4882a593Smuzhiyun bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2263*4882a593Smuzhiyun {
2264*4882a593Smuzhiyun 	bfa_auto_recover = auto_recover;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun 
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_is_operational(struct bfa_ioc_s * ioc)2270*4882a593Smuzhiyun bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2271*4882a593Smuzhiyun {
2272*4882a593Smuzhiyun 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_is_initialized(struct bfa_ioc_s * ioc)2276*4882a593Smuzhiyun bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun 	u32 r32 = bfa_ioc_get_cur_ioc_fwstate(ioc);
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	return ((r32 != BFI_IOC_UNINIT) &&
2281*4882a593Smuzhiyun 		(r32 != BFI_IOC_INITING) &&
2282*4882a593Smuzhiyun 		(r32 != BFI_IOC_MEMTEST));
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_msgget(struct bfa_ioc_s * ioc,void * mbmsg)2286*4882a593Smuzhiyun bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2287*4882a593Smuzhiyun {
2288*4882a593Smuzhiyun 	__be32	*msgp = mbmsg;
2289*4882a593Smuzhiyun 	u32	r32;
2290*4882a593Smuzhiyun 	int		i;
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2293*4882a593Smuzhiyun 	if ((r32 & 1) == 0)
2294*4882a593Smuzhiyun 		return BFA_FALSE;
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 	/*
2297*4882a593Smuzhiyun 	 * read the MBOX msg
2298*4882a593Smuzhiyun 	 */
2299*4882a593Smuzhiyun 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2300*4882a593Smuzhiyun 	     i++) {
2301*4882a593Smuzhiyun 		r32 = readl(ioc->ioc_regs.lpu_mbox +
2302*4882a593Smuzhiyun 				   i * sizeof(u32));
2303*4882a593Smuzhiyun 		msgp[i] = cpu_to_be32(r32);
2304*4882a593Smuzhiyun 	}
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 	/*
2307*4882a593Smuzhiyun 	 * turn off mailbox interrupt by clearing mailbox status
2308*4882a593Smuzhiyun 	 */
2309*4882a593Smuzhiyun 	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2310*4882a593Smuzhiyun 	readl(ioc->ioc_regs.lpu_mbox_cmd);
2311*4882a593Smuzhiyun 
2312*4882a593Smuzhiyun 	return BFA_TRUE;
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun void
bfa_ioc_isr(struct bfa_ioc_s * ioc,struct bfi_mbmsg_s * m)2316*4882a593Smuzhiyun bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2317*4882a593Smuzhiyun {
2318*4882a593Smuzhiyun 	union bfi_ioc_i2h_msg_u	*msg;
2319*4882a593Smuzhiyun 	struct bfa_iocpf_s *iocpf = &ioc->iocpf;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	msg = (union bfi_ioc_i2h_msg_u *) m;
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_isrs);
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
2326*4882a593Smuzhiyun 	case BFI_IOC_I2H_HBEAT:
2327*4882a593Smuzhiyun 		break;
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	case BFI_IOC_I2H_ENABLE_REPLY:
2330*4882a593Smuzhiyun 		ioc->port_mode = ioc->port_mode_cfg =
2331*4882a593Smuzhiyun 				(enum bfa_mode_s)msg->fw_event.port_mode;
2332*4882a593Smuzhiyun 		ioc->ad_cap_bm = msg->fw_event.cap_bm;
2333*4882a593Smuzhiyun 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2334*4882a593Smuzhiyun 		break;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	case BFI_IOC_I2H_DISABLE_REPLY:
2337*4882a593Smuzhiyun 		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2338*4882a593Smuzhiyun 		break;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	case BFI_IOC_I2H_GETATTR_REPLY:
2341*4882a593Smuzhiyun 		bfa_ioc_getattr_reply(ioc);
2342*4882a593Smuzhiyun 		break;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	default:
2345*4882a593Smuzhiyun 		bfa_trc(ioc, msg->mh.msg_id);
2346*4882a593Smuzhiyun 		WARN_ON(1);
2347*4882a593Smuzhiyun 	}
2348*4882a593Smuzhiyun }
2349*4882a593Smuzhiyun 
2350*4882a593Smuzhiyun /*
2351*4882a593Smuzhiyun  * IOC attach time initialization and setup.
2352*4882a593Smuzhiyun  *
2353*4882a593Smuzhiyun  * @param[in]	ioc	memory for IOC
2354*4882a593Smuzhiyun  * @param[in]	bfa	driver instance structure
2355*4882a593Smuzhiyun  */
2356*4882a593Smuzhiyun void
bfa_ioc_attach(struct bfa_ioc_s * ioc,void * bfa,struct bfa_ioc_cbfn_s * cbfn,struct bfa_timer_mod_s * timer_mod)2357*4882a593Smuzhiyun bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
2358*4882a593Smuzhiyun 	       struct bfa_timer_mod_s *timer_mod)
2359*4882a593Smuzhiyun {
2360*4882a593Smuzhiyun 	ioc->bfa	= bfa;
2361*4882a593Smuzhiyun 	ioc->cbfn	= cbfn;
2362*4882a593Smuzhiyun 	ioc->timer_mod	= timer_mod;
2363*4882a593Smuzhiyun 	ioc->fcmode	= BFA_FALSE;
2364*4882a593Smuzhiyun 	ioc->pllinit	= BFA_FALSE;
2365*4882a593Smuzhiyun 	ioc->dbg_fwsave_once = BFA_TRUE;
2366*4882a593Smuzhiyun 	ioc->iocpf.ioc	= ioc;
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	bfa_ioc_mbox_attach(ioc);
2369*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ioc->notify_q);
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun 	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2372*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_RESET);
2373*4882a593Smuzhiyun }
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun /*
2376*4882a593Smuzhiyun  * Driver detach time IOC cleanup.
2377*4882a593Smuzhiyun  */
2378*4882a593Smuzhiyun void
bfa_ioc_detach(struct bfa_ioc_s * ioc)2379*4882a593Smuzhiyun bfa_ioc_detach(struct bfa_ioc_s *ioc)
2380*4882a593Smuzhiyun {
2381*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2382*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ioc->notify_q);
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun /*
2386*4882a593Smuzhiyun  * Setup IOC PCI properties.
2387*4882a593Smuzhiyun  *
2388*4882a593Smuzhiyun  * @param[in]	pcidev	PCI device information for this IOC
2389*4882a593Smuzhiyun  */
2390*4882a593Smuzhiyun void
bfa_ioc_pci_init(struct bfa_ioc_s * ioc,struct bfa_pcidev_s * pcidev,enum bfi_pcifn_class clscode)2391*4882a593Smuzhiyun bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2392*4882a593Smuzhiyun 		enum bfi_pcifn_class clscode)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun 	ioc->clscode	= clscode;
2395*4882a593Smuzhiyun 	ioc->pcidev	= *pcidev;
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 	/*
2398*4882a593Smuzhiyun 	 * Initialize IOC and device personality
2399*4882a593Smuzhiyun 	 */
2400*4882a593Smuzhiyun 	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2401*4882a593Smuzhiyun 	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	switch (pcidev->device_id) {
2404*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_FC_8G1P:
2405*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_FC_8G2P:
2406*4882a593Smuzhiyun 		ioc->asic_gen = BFI_ASIC_GEN_CB;
2407*4882a593Smuzhiyun 		ioc->fcmode = BFA_TRUE;
2408*4882a593Smuzhiyun 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2409*4882a593Smuzhiyun 		ioc->ad_cap_bm = BFA_CM_HBA;
2410*4882a593Smuzhiyun 		break;
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_CT:
2413*4882a593Smuzhiyun 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2414*4882a593Smuzhiyun 		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2415*4882a593Smuzhiyun 		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2416*4882a593Smuzhiyun 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2417*4882a593Smuzhiyun 		ioc->ad_cap_bm = BFA_CM_CNA;
2418*4882a593Smuzhiyun 		break;
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_CT_FC:
2421*4882a593Smuzhiyun 		ioc->asic_gen = BFI_ASIC_GEN_CT;
2422*4882a593Smuzhiyun 		ioc->fcmode = BFA_TRUE;
2423*4882a593Smuzhiyun 		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2424*4882a593Smuzhiyun 		ioc->ad_cap_bm = BFA_CM_HBA;
2425*4882a593Smuzhiyun 		break;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_CT2:
2428*4882a593Smuzhiyun 	case BFA_PCI_DEVICE_ID_CT2_QUAD:
2429*4882a593Smuzhiyun 		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2430*4882a593Smuzhiyun 		if (clscode == BFI_PCIFN_CLASS_FC &&
2431*4882a593Smuzhiyun 		    pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2432*4882a593Smuzhiyun 			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2433*4882a593Smuzhiyun 			ioc->fcmode = BFA_TRUE;
2434*4882a593Smuzhiyun 			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2435*4882a593Smuzhiyun 			ioc->ad_cap_bm = BFA_CM_HBA;
2436*4882a593Smuzhiyun 		} else {
2437*4882a593Smuzhiyun 			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2438*4882a593Smuzhiyun 			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2439*4882a593Smuzhiyun 			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2440*4882a593Smuzhiyun 				ioc->port_mode =
2441*4882a593Smuzhiyun 				ioc->port_mode_cfg = BFA_MODE_CNA;
2442*4882a593Smuzhiyun 				ioc->ad_cap_bm = BFA_CM_CNA;
2443*4882a593Smuzhiyun 			} else {
2444*4882a593Smuzhiyun 				ioc->port_mode =
2445*4882a593Smuzhiyun 				ioc->port_mode_cfg = BFA_MODE_NIC;
2446*4882a593Smuzhiyun 				ioc->ad_cap_bm = BFA_CM_NIC;
2447*4882a593Smuzhiyun 			}
2448*4882a593Smuzhiyun 		}
2449*4882a593Smuzhiyun 		break;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	default:
2452*4882a593Smuzhiyun 		WARN_ON(1);
2453*4882a593Smuzhiyun 	}
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	/*
2456*4882a593Smuzhiyun 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2457*4882a593Smuzhiyun 	 */
2458*4882a593Smuzhiyun 	if (ioc->asic_gen == BFI_ASIC_GEN_CB)
2459*4882a593Smuzhiyun 		bfa_ioc_set_cb_hwif(ioc);
2460*4882a593Smuzhiyun 	else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2461*4882a593Smuzhiyun 		bfa_ioc_set_ct_hwif(ioc);
2462*4882a593Smuzhiyun 	else {
2463*4882a593Smuzhiyun 		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2464*4882a593Smuzhiyun 		bfa_ioc_set_ct2_hwif(ioc);
2465*4882a593Smuzhiyun 		bfa_ioc_ct2_poweron(ioc);
2466*4882a593Smuzhiyun 	}
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun 	bfa_ioc_map_port(ioc);
2469*4882a593Smuzhiyun 	bfa_ioc_reg_init(ioc);
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun /*
2473*4882a593Smuzhiyun  * Initialize IOC dma memory
2474*4882a593Smuzhiyun  *
2475*4882a593Smuzhiyun  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
2476*4882a593Smuzhiyun  * @param[in]	dm_pa	physical address of IOC dma memory
2477*4882a593Smuzhiyun  */
2478*4882a593Smuzhiyun void
bfa_ioc_mem_claim(struct bfa_ioc_s * ioc,u8 * dm_kva,u64 dm_pa)2479*4882a593Smuzhiyun bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun 	/*
2482*4882a593Smuzhiyun 	 * dma memory for firmware attribute
2483*4882a593Smuzhiyun 	 */
2484*4882a593Smuzhiyun 	ioc->attr_dma.kva = dm_kva;
2485*4882a593Smuzhiyun 	ioc->attr_dma.pa = dm_pa;
2486*4882a593Smuzhiyun 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2487*4882a593Smuzhiyun }
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun void
bfa_ioc_enable(struct bfa_ioc_s * ioc)2490*4882a593Smuzhiyun bfa_ioc_enable(struct bfa_ioc_s *ioc)
2491*4882a593Smuzhiyun {
2492*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_enables);
2493*4882a593Smuzhiyun 	ioc->dbg_fwsave_once = BFA_TRUE;
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2496*4882a593Smuzhiyun }
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun void
bfa_ioc_disable(struct bfa_ioc_s * ioc)2499*4882a593Smuzhiyun bfa_ioc_disable(struct bfa_ioc_s *ioc)
2500*4882a593Smuzhiyun {
2501*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_disables);
2502*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2503*4882a593Smuzhiyun }
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun void
bfa_ioc_suspend(struct bfa_ioc_s * ioc)2506*4882a593Smuzhiyun bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2507*4882a593Smuzhiyun {
2508*4882a593Smuzhiyun 	ioc->dbg_fwsave_once = BFA_TRUE;
2509*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2510*4882a593Smuzhiyun }
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun /*
2513*4882a593Smuzhiyun  * Initialize memory for saving firmware trace. Driver must initialize
2514*4882a593Smuzhiyun  * trace memory before call bfa_ioc_enable().
2515*4882a593Smuzhiyun  */
2516*4882a593Smuzhiyun void
bfa_ioc_debug_memclaim(struct bfa_ioc_s * ioc,void * dbg_fwsave)2517*4882a593Smuzhiyun bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2518*4882a593Smuzhiyun {
2519*4882a593Smuzhiyun 	ioc->dbg_fwsave	    = dbg_fwsave;
2520*4882a593Smuzhiyun 	ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
2521*4882a593Smuzhiyun }
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun /*
2524*4882a593Smuzhiyun  * Register mailbox message handler functions
2525*4882a593Smuzhiyun  *
2526*4882a593Smuzhiyun  * @param[in]	ioc		IOC instance
2527*4882a593Smuzhiyun  * @param[in]	mcfuncs		message class handler functions
2528*4882a593Smuzhiyun  */
2529*4882a593Smuzhiyun void
bfa_ioc_mbox_register(struct bfa_ioc_s * ioc,bfa_ioc_mbox_mcfunc_t * mcfuncs)2530*4882a593Smuzhiyun bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2531*4882a593Smuzhiyun {
2532*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2533*4882a593Smuzhiyun 	int				mc;
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	for (mc = 0; mc < BFI_MC_MAX; mc++)
2536*4882a593Smuzhiyun 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun /*
2540*4882a593Smuzhiyun  * Register mailbox message handler function, to be called by common modules
2541*4882a593Smuzhiyun  */
2542*4882a593Smuzhiyun void
bfa_ioc_mbox_regisr(struct bfa_ioc_s * ioc,enum bfi_mclass mc,bfa_ioc_mbox_mcfunc_t cbfn,void * cbarg)2543*4882a593Smuzhiyun bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2544*4882a593Smuzhiyun 		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2545*4882a593Smuzhiyun {
2546*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	mod->mbhdlr[mc].cbfn	= cbfn;
2549*4882a593Smuzhiyun 	mod->mbhdlr[mc].cbarg	= cbarg;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun /*
2553*4882a593Smuzhiyun  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2554*4882a593Smuzhiyun  * Responsibility of caller to serialize
2555*4882a593Smuzhiyun  *
2556*4882a593Smuzhiyun  * @param[in]	ioc	IOC instance
2557*4882a593Smuzhiyun  * @param[i]	cmd	Mailbox command
2558*4882a593Smuzhiyun  */
2559*4882a593Smuzhiyun void
bfa_ioc_mbox_queue(struct bfa_ioc_s * ioc,struct bfa_mbox_cmd_s * cmd)2560*4882a593Smuzhiyun bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2561*4882a593Smuzhiyun {
2562*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2563*4882a593Smuzhiyun 	u32			stat;
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	/*
2566*4882a593Smuzhiyun 	 * If a previous command is pending, queue new command
2567*4882a593Smuzhiyun 	 */
2568*4882a593Smuzhiyun 	if (!list_empty(&mod->cmd_q)) {
2569*4882a593Smuzhiyun 		list_add_tail(&cmd->qe, &mod->cmd_q);
2570*4882a593Smuzhiyun 		return;
2571*4882a593Smuzhiyun 	}
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	/*
2574*4882a593Smuzhiyun 	 * If mailbox is busy, queue command for poll timer
2575*4882a593Smuzhiyun 	 */
2576*4882a593Smuzhiyun 	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2577*4882a593Smuzhiyun 	if (stat) {
2578*4882a593Smuzhiyun 		list_add_tail(&cmd->qe, &mod->cmd_q);
2579*4882a593Smuzhiyun 		return;
2580*4882a593Smuzhiyun 	}
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 	/*
2583*4882a593Smuzhiyun 	 * mailbox is free -- queue command to firmware
2584*4882a593Smuzhiyun 	 */
2585*4882a593Smuzhiyun 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun /*
2589*4882a593Smuzhiyun  * Handle mailbox interrupts
2590*4882a593Smuzhiyun  */
2591*4882a593Smuzhiyun void
bfa_ioc_mbox_isr(struct bfa_ioc_s * ioc)2592*4882a593Smuzhiyun bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2593*4882a593Smuzhiyun {
2594*4882a593Smuzhiyun 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
2595*4882a593Smuzhiyun 	struct bfi_mbmsg_s		m;
2596*4882a593Smuzhiyun 	int				mc;
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	if (bfa_ioc_msgget(ioc, &m)) {
2599*4882a593Smuzhiyun 		/*
2600*4882a593Smuzhiyun 		 * Treat IOC message class as special.
2601*4882a593Smuzhiyun 		 */
2602*4882a593Smuzhiyun 		mc = m.mh.msg_class;
2603*4882a593Smuzhiyun 		if (mc == BFI_MC_IOC) {
2604*4882a593Smuzhiyun 			bfa_ioc_isr(ioc, &m);
2605*4882a593Smuzhiyun 			return;
2606*4882a593Smuzhiyun 		}
2607*4882a593Smuzhiyun 
2608*4882a593Smuzhiyun 		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2609*4882a593Smuzhiyun 			return;
2610*4882a593Smuzhiyun 
2611*4882a593Smuzhiyun 		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2612*4882a593Smuzhiyun 	}
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	bfa_ioc_lpu_read_stat(ioc);
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	/*
2617*4882a593Smuzhiyun 	 * Try to send pending mailbox commands
2618*4882a593Smuzhiyun 	 */
2619*4882a593Smuzhiyun 	bfa_ioc_mbox_poll(ioc);
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun 
2622*4882a593Smuzhiyun void
bfa_ioc_error_isr(struct bfa_ioc_s * ioc)2623*4882a593Smuzhiyun bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2624*4882a593Smuzhiyun {
2625*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_hbfails);
2626*4882a593Smuzhiyun 	ioc->stats.hb_count = ioc->hb_count;
2627*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun /*
2631*4882a593Smuzhiyun  * return true if IOC is disabled
2632*4882a593Smuzhiyun  */
2633*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_is_disabled(struct bfa_ioc_s * ioc)2634*4882a593Smuzhiyun bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2637*4882a593Smuzhiyun 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun /*
2641*4882a593Smuzhiyun  * return true if IOC firmware is different.
2642*4882a593Smuzhiyun  */
2643*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_fw_mismatch(struct bfa_ioc_s * ioc)2644*4882a593Smuzhiyun bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2645*4882a593Smuzhiyun {
2646*4882a593Smuzhiyun 	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2647*4882a593Smuzhiyun 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2648*4882a593Smuzhiyun 		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun /*
2652*4882a593Smuzhiyun  * Check if adapter is disabled -- both IOCs should be in a disabled
2653*4882a593Smuzhiyun  * state.
2654*4882a593Smuzhiyun  */
2655*4882a593Smuzhiyun bfa_boolean_t
bfa_ioc_adapter_is_disabled(struct bfa_ioc_s * ioc)2656*4882a593Smuzhiyun bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2657*4882a593Smuzhiyun {
2658*4882a593Smuzhiyun 	u32	ioc_state;
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2661*4882a593Smuzhiyun 		return BFA_FALSE;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2664*4882a593Smuzhiyun 	if (!bfa_ioc_state_disabled(ioc_state))
2665*4882a593Smuzhiyun 		return BFA_FALSE;
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2668*4882a593Smuzhiyun 		ioc_state = bfa_ioc_get_cur_ioc_fwstate(ioc);
2669*4882a593Smuzhiyun 		if (!bfa_ioc_state_disabled(ioc_state))
2670*4882a593Smuzhiyun 			return BFA_FALSE;
2671*4882a593Smuzhiyun 	}
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 	return BFA_TRUE;
2674*4882a593Smuzhiyun }
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun /*
2677*4882a593Smuzhiyun  * Reset IOC fwstate registers.
2678*4882a593Smuzhiyun  */
2679*4882a593Smuzhiyun void
bfa_ioc_reset_fwstate(struct bfa_ioc_s * ioc)2680*4882a593Smuzhiyun bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2681*4882a593Smuzhiyun {
2682*4882a593Smuzhiyun 	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2683*4882a593Smuzhiyun 	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun #define BFA_MFG_NAME "QLogic"
2687*4882a593Smuzhiyun void
bfa_ioc_get_adapter_attr(struct bfa_ioc_s * ioc,struct bfa_adapter_attr_s * ad_attr)2688*4882a593Smuzhiyun bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2689*4882a593Smuzhiyun 			 struct bfa_adapter_attr_s *ad_attr)
2690*4882a593Smuzhiyun {
2691*4882a593Smuzhiyun 	struct bfi_ioc_attr_s	*ioc_attr;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 	ioc_attr = ioc->attr;
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2696*4882a593Smuzhiyun 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2697*4882a593Smuzhiyun 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2698*4882a593Smuzhiyun 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2699*4882a593Smuzhiyun 	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2700*4882a593Smuzhiyun 		      sizeof(struct bfa_mfg_vpd_s));
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	ad_attr->nports = bfa_ioc_get_nports(ioc);
2703*4882a593Smuzhiyun 	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2706*4882a593Smuzhiyun 	/* For now, model descr uses same model string */
2707*4882a593Smuzhiyun 	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	ad_attr->card_type = ioc_attr->card_type;
2710*4882a593Smuzhiyun 	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2713*4882a593Smuzhiyun 		ad_attr->prototype = 1;
2714*4882a593Smuzhiyun 	else
2715*4882a593Smuzhiyun 		ad_attr->prototype = 0;
2716*4882a593Smuzhiyun 
2717*4882a593Smuzhiyun 	ad_attr->pwwn = ioc->attr->pwwn;
2718*4882a593Smuzhiyun 	ad_attr->mac  = bfa_ioc_get_mac(ioc);
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2721*4882a593Smuzhiyun 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2722*4882a593Smuzhiyun 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2723*4882a593Smuzhiyun 	ad_attr->asic_rev = ioc_attr->asic_rev;
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2728*4882a593Smuzhiyun 	ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2729*4882a593Smuzhiyun 				  !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
2730*4882a593Smuzhiyun 	ad_attr->mfg_day = ioc_attr->mfg_day;
2731*4882a593Smuzhiyun 	ad_attr->mfg_month = ioc_attr->mfg_month;
2732*4882a593Smuzhiyun 	ad_attr->mfg_year = ioc_attr->mfg_year;
2733*4882a593Smuzhiyun 	memcpy(ad_attr->uuid, ioc_attr->uuid, BFA_ADAPTER_UUID_LEN);
2734*4882a593Smuzhiyun }
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun enum bfa_ioc_type_e
bfa_ioc_get_type(struct bfa_ioc_s * ioc)2737*4882a593Smuzhiyun bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2738*4882a593Smuzhiyun {
2739*4882a593Smuzhiyun 	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2740*4882a593Smuzhiyun 		return BFA_IOC_TYPE_LL;
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2743*4882a593Smuzhiyun 
2744*4882a593Smuzhiyun 	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2745*4882a593Smuzhiyun 		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2746*4882a593Smuzhiyun }
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun void
bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s * ioc,char * serial_num)2749*4882a593Smuzhiyun bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2750*4882a593Smuzhiyun {
2751*4882a593Smuzhiyun 	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2752*4882a593Smuzhiyun 	memcpy((void *)serial_num,
2753*4882a593Smuzhiyun 			(void *)ioc->attr->brcd_serialnum,
2754*4882a593Smuzhiyun 			BFA_ADAPTER_SERIAL_NUM_LEN);
2755*4882a593Smuzhiyun }
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun void
bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s * ioc,char * fw_ver)2758*4882a593Smuzhiyun bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun 	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2761*4882a593Smuzhiyun 	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2762*4882a593Smuzhiyun }
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun void
bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s * ioc,char * chip_rev)2765*4882a593Smuzhiyun bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2766*4882a593Smuzhiyun {
2767*4882a593Smuzhiyun 	WARN_ON(!chip_rev);
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 	chip_rev[0] = 'R';
2772*4882a593Smuzhiyun 	chip_rev[1] = 'e';
2773*4882a593Smuzhiyun 	chip_rev[2] = 'v';
2774*4882a593Smuzhiyun 	chip_rev[3] = '-';
2775*4882a593Smuzhiyun 	chip_rev[4] = ioc->attr->asic_rev;
2776*4882a593Smuzhiyun 	chip_rev[5] = '\0';
2777*4882a593Smuzhiyun }
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun void
bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s * ioc,char * optrom_ver)2780*4882a593Smuzhiyun bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2781*4882a593Smuzhiyun {
2782*4882a593Smuzhiyun 	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2783*4882a593Smuzhiyun 	memcpy(optrom_ver, ioc->attr->optrom_version,
2784*4882a593Smuzhiyun 		      BFA_VERSION_LEN);
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s * ioc,char * manufacturer)2788*4882a593Smuzhiyun bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2789*4882a593Smuzhiyun {
2790*4882a593Smuzhiyun 	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2791*4882a593Smuzhiyun 	strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun void
bfa_ioc_get_adapter_model(struct bfa_ioc_s * ioc,char * model)2795*4882a593Smuzhiyun bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2796*4882a593Smuzhiyun {
2797*4882a593Smuzhiyun 	struct bfi_ioc_attr_s	*ioc_attr;
2798*4882a593Smuzhiyun 	u8 nports = bfa_ioc_get_nports(ioc);
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun 	WARN_ON(!model);
2801*4882a593Smuzhiyun 	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	ioc_attr = ioc->attr;
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 	if (bfa_asic_id_ct2(ioc->pcidev.device_id) &&
2806*4882a593Smuzhiyun 		(!bfa_mfg_is_mezz(ioc_attr->card_type)))
2807*4882a593Smuzhiyun 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u-%u%s",
2808*4882a593Smuzhiyun 			BFA_MFG_NAME, ioc_attr->card_type, nports, "p");
2809*4882a593Smuzhiyun 	else
2810*4882a593Smuzhiyun 		snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2811*4882a593Smuzhiyun 			BFA_MFG_NAME, ioc_attr->card_type);
2812*4882a593Smuzhiyun }
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun enum bfa_ioc_state
bfa_ioc_get_state(struct bfa_ioc_s * ioc)2815*4882a593Smuzhiyun bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2816*4882a593Smuzhiyun {
2817*4882a593Smuzhiyun 	enum bfa_iocpf_state iocpf_st;
2818*4882a593Smuzhiyun 	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	if (ioc_st == BFA_IOC_ENABLING ||
2821*4882a593Smuzhiyun 		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2822*4882a593Smuzhiyun 
2823*4882a593Smuzhiyun 		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2824*4882a593Smuzhiyun 
2825*4882a593Smuzhiyun 		switch (iocpf_st) {
2826*4882a593Smuzhiyun 		case BFA_IOCPF_SEMWAIT:
2827*4882a593Smuzhiyun 			ioc_st = BFA_IOC_SEMWAIT;
2828*4882a593Smuzhiyun 			break;
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 		case BFA_IOCPF_HWINIT:
2831*4882a593Smuzhiyun 			ioc_st = BFA_IOC_HWINIT;
2832*4882a593Smuzhiyun 			break;
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 		case BFA_IOCPF_FWMISMATCH:
2835*4882a593Smuzhiyun 			ioc_st = BFA_IOC_FWMISMATCH;
2836*4882a593Smuzhiyun 			break;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 		case BFA_IOCPF_FAIL:
2839*4882a593Smuzhiyun 			ioc_st = BFA_IOC_FAIL;
2840*4882a593Smuzhiyun 			break;
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 		case BFA_IOCPF_INITFAIL:
2843*4882a593Smuzhiyun 			ioc_st = BFA_IOC_INITFAIL;
2844*4882a593Smuzhiyun 			break;
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 		default:
2847*4882a593Smuzhiyun 			break;
2848*4882a593Smuzhiyun 		}
2849*4882a593Smuzhiyun 	}
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 	return ioc_st;
2852*4882a593Smuzhiyun }
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun void
bfa_ioc_get_attr(struct bfa_ioc_s * ioc,struct bfa_ioc_attr_s * ioc_attr)2855*4882a593Smuzhiyun bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2856*4882a593Smuzhiyun {
2857*4882a593Smuzhiyun 	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 	ioc_attr->state = bfa_ioc_get_state(ioc);
2860*4882a593Smuzhiyun 	ioc_attr->port_id = bfa_ioc_portid(ioc);
2861*4882a593Smuzhiyun 	ioc_attr->port_mode = ioc->port_mode;
2862*4882a593Smuzhiyun 	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2863*4882a593Smuzhiyun 	ioc_attr->cap_bm = ioc->ad_cap_bm;
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2866*4882a593Smuzhiyun 
2867*4882a593Smuzhiyun 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2870*4882a593Smuzhiyun 	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2871*4882a593Smuzhiyun 	ioc_attr->def_fn = (bfa_ioc_pcifn(ioc) == bfa_ioc_portid(ioc));
2872*4882a593Smuzhiyun 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2873*4882a593Smuzhiyun }
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun mac_t
bfa_ioc_get_mac(struct bfa_ioc_s * ioc)2876*4882a593Smuzhiyun bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2877*4882a593Smuzhiyun {
2878*4882a593Smuzhiyun 	/*
2879*4882a593Smuzhiyun 	 * Check the IOC type and return the appropriate MAC
2880*4882a593Smuzhiyun 	 */
2881*4882a593Smuzhiyun 	if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2882*4882a593Smuzhiyun 		return ioc->attr->fcoe_mac;
2883*4882a593Smuzhiyun 	else
2884*4882a593Smuzhiyun 		return ioc->attr->mac;
2885*4882a593Smuzhiyun }
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun mac_t
bfa_ioc_get_mfg_mac(struct bfa_ioc_s * ioc)2888*4882a593Smuzhiyun bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2889*4882a593Smuzhiyun {
2890*4882a593Smuzhiyun 	mac_t	m;
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	m = ioc->attr->mfg_mac;
2893*4882a593Smuzhiyun 	if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2894*4882a593Smuzhiyun 		m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2895*4882a593Smuzhiyun 	else
2896*4882a593Smuzhiyun 		bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2897*4882a593Smuzhiyun 			bfa_ioc_pcifn(ioc));
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 	return m;
2900*4882a593Smuzhiyun }
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun /*
2903*4882a593Smuzhiyun  * Send AEN notification
2904*4882a593Smuzhiyun  */
2905*4882a593Smuzhiyun void
bfa_ioc_aen_post(struct bfa_ioc_s * ioc,enum bfa_ioc_aen_event event)2906*4882a593Smuzhiyun bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2907*4882a593Smuzhiyun {
2908*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2909*4882a593Smuzhiyun 	struct bfa_aen_entry_s	*aen_entry;
2910*4882a593Smuzhiyun 	enum bfa_ioc_type_e ioc_type;
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 	bfad_get_aen_entry(bfad, aen_entry);
2913*4882a593Smuzhiyun 	if (!aen_entry)
2914*4882a593Smuzhiyun 		return;
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	ioc_type = bfa_ioc_get_type(ioc);
2917*4882a593Smuzhiyun 	switch (ioc_type) {
2918*4882a593Smuzhiyun 	case BFA_IOC_TYPE_FC:
2919*4882a593Smuzhiyun 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2920*4882a593Smuzhiyun 		break;
2921*4882a593Smuzhiyun 	case BFA_IOC_TYPE_FCoE:
2922*4882a593Smuzhiyun 		aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2923*4882a593Smuzhiyun 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2924*4882a593Smuzhiyun 		break;
2925*4882a593Smuzhiyun 	case BFA_IOC_TYPE_LL:
2926*4882a593Smuzhiyun 		aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2927*4882a593Smuzhiyun 		break;
2928*4882a593Smuzhiyun 	default:
2929*4882a593Smuzhiyun 		WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2930*4882a593Smuzhiyun 		break;
2931*4882a593Smuzhiyun 	}
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 	/* Send the AEN notification */
2934*4882a593Smuzhiyun 	aen_entry->aen_data.ioc.ioc_type = ioc_type;
2935*4882a593Smuzhiyun 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2936*4882a593Smuzhiyun 				  BFA_AEN_CAT_IOC, event);
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun 
2939*4882a593Smuzhiyun /*
2940*4882a593Smuzhiyun  * Retrieve saved firmware trace from a prior IOC failure.
2941*4882a593Smuzhiyun  */
2942*4882a593Smuzhiyun bfa_status_t
bfa_ioc_debug_fwsave(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2943*4882a593Smuzhiyun bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2944*4882a593Smuzhiyun {
2945*4882a593Smuzhiyun 	int	tlen;
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 	if (ioc->dbg_fwsave_len == 0)
2948*4882a593Smuzhiyun 		return BFA_STATUS_ENOFSAVE;
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	tlen = *trclen;
2951*4882a593Smuzhiyun 	if (tlen > ioc->dbg_fwsave_len)
2952*4882a593Smuzhiyun 		tlen = ioc->dbg_fwsave_len;
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2955*4882a593Smuzhiyun 	*trclen = tlen;
2956*4882a593Smuzhiyun 	return BFA_STATUS_OK;
2957*4882a593Smuzhiyun }
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun /*
2961*4882a593Smuzhiyun  * Retrieve saved firmware trace from a prior IOC failure.
2962*4882a593Smuzhiyun  */
2963*4882a593Smuzhiyun bfa_status_t
bfa_ioc_debug_fwtrc(struct bfa_ioc_s * ioc,void * trcdata,int * trclen)2964*4882a593Smuzhiyun bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2965*4882a593Smuzhiyun {
2966*4882a593Smuzhiyun 	u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2967*4882a593Smuzhiyun 	int tlen;
2968*4882a593Smuzhiyun 	bfa_status_t status;
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	bfa_trc(ioc, *trclen);
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	tlen = *trclen;
2973*4882a593Smuzhiyun 	if (tlen > BFA_DBG_FWTRC_LEN)
2974*4882a593Smuzhiyun 		tlen = BFA_DBG_FWTRC_LEN;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun 	status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2977*4882a593Smuzhiyun 	*trclen = tlen;
2978*4882a593Smuzhiyun 	return status;
2979*4882a593Smuzhiyun }
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun static void
bfa_ioc_send_fwsync(struct bfa_ioc_s * ioc)2982*4882a593Smuzhiyun bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2983*4882a593Smuzhiyun {
2984*4882a593Smuzhiyun 	struct bfa_mbox_cmd_s cmd;
2985*4882a593Smuzhiyun 	struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2986*4882a593Smuzhiyun 
2987*4882a593Smuzhiyun 	bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2988*4882a593Smuzhiyun 		    bfa_ioc_portid(ioc));
2989*4882a593Smuzhiyun 	req->clscode = cpu_to_be16(ioc->clscode);
2990*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ioc, &cmd);
2991*4882a593Smuzhiyun }
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun static void
bfa_ioc_fwsync(struct bfa_ioc_s * ioc)2994*4882a593Smuzhiyun bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2995*4882a593Smuzhiyun {
2996*4882a593Smuzhiyun 	u32 fwsync_iter = 1000;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 	bfa_ioc_send_fwsync(ioc);
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	/*
3001*4882a593Smuzhiyun 	 * After sending a fw sync mbox command wait for it to
3002*4882a593Smuzhiyun 	 * take effect.  We will not wait for a response because
3003*4882a593Smuzhiyun 	 *    1. fw_sync mbox cmd doesn't have a response.
3004*4882a593Smuzhiyun 	 *    2. Even if we implement that,  interrupts might not
3005*4882a593Smuzhiyun 	 *	 be enabled when we call this function.
3006*4882a593Smuzhiyun 	 * So, just keep checking if any mbox cmd is pending, and
3007*4882a593Smuzhiyun 	 * after waiting for a reasonable amount of time, go ahead.
3008*4882a593Smuzhiyun 	 * It is possible that fw has crashed and the mbox command
3009*4882a593Smuzhiyun 	 * is never acknowledged.
3010*4882a593Smuzhiyun 	 */
3011*4882a593Smuzhiyun 	while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
3012*4882a593Smuzhiyun 		fwsync_iter--;
3013*4882a593Smuzhiyun }
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun /*
3016*4882a593Smuzhiyun  * Dump firmware smem
3017*4882a593Smuzhiyun  */
3018*4882a593Smuzhiyun bfa_status_t
bfa_ioc_debug_fwcore(struct bfa_ioc_s * ioc,void * buf,u32 * offset,int * buflen)3019*4882a593Smuzhiyun bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
3020*4882a593Smuzhiyun 				u32 *offset, int *buflen)
3021*4882a593Smuzhiyun {
3022*4882a593Smuzhiyun 	u32 loff;
3023*4882a593Smuzhiyun 	int dlen;
3024*4882a593Smuzhiyun 	bfa_status_t status;
3025*4882a593Smuzhiyun 	u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	if (*offset >= smem_len) {
3028*4882a593Smuzhiyun 		*offset = *buflen = 0;
3029*4882a593Smuzhiyun 		return BFA_STATUS_EINVAL;
3030*4882a593Smuzhiyun 	}
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 	loff = *offset;
3033*4882a593Smuzhiyun 	dlen = *buflen;
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 	/*
3036*4882a593Smuzhiyun 	 * First smem read, sync smem before proceeding
3037*4882a593Smuzhiyun 	 * No need to sync before reading every chunk.
3038*4882a593Smuzhiyun 	 */
3039*4882a593Smuzhiyun 	if (loff == 0)
3040*4882a593Smuzhiyun 		bfa_ioc_fwsync(ioc);
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 	if ((loff + dlen) >= smem_len)
3043*4882a593Smuzhiyun 		dlen = smem_len - loff;
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun 	status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 	if (status != BFA_STATUS_OK) {
3048*4882a593Smuzhiyun 		*offset = *buflen = 0;
3049*4882a593Smuzhiyun 		return status;
3050*4882a593Smuzhiyun 	}
3051*4882a593Smuzhiyun 
3052*4882a593Smuzhiyun 	*offset += dlen;
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 	if (*offset >= smem_len)
3055*4882a593Smuzhiyun 		*offset = 0;
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 	*buflen = dlen;
3058*4882a593Smuzhiyun 
3059*4882a593Smuzhiyun 	return status;
3060*4882a593Smuzhiyun }
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun /*
3063*4882a593Smuzhiyun  * Firmware statistics
3064*4882a593Smuzhiyun  */
3065*4882a593Smuzhiyun bfa_status_t
bfa_ioc_fw_stats_get(struct bfa_ioc_s * ioc,void * stats)3066*4882a593Smuzhiyun bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
3067*4882a593Smuzhiyun {
3068*4882a593Smuzhiyun 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3069*4882a593Smuzhiyun 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3070*4882a593Smuzhiyun 	int tlen;
3071*4882a593Smuzhiyun 	bfa_status_t status;
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun 	if (ioc->stats_busy) {
3074*4882a593Smuzhiyun 		bfa_trc(ioc, ioc->stats_busy);
3075*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
3076*4882a593Smuzhiyun 	}
3077*4882a593Smuzhiyun 	ioc->stats_busy = BFA_TRUE;
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun 	tlen = sizeof(struct bfa_fw_stats_s);
3080*4882a593Smuzhiyun 	status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	ioc->stats_busy = BFA_FALSE;
3083*4882a593Smuzhiyun 	return status;
3084*4882a593Smuzhiyun }
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun bfa_status_t
bfa_ioc_fw_stats_clear(struct bfa_ioc_s * ioc)3087*4882a593Smuzhiyun bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
3088*4882a593Smuzhiyun {
3089*4882a593Smuzhiyun 	u32 loff = BFI_IOC_FWSTATS_OFF + \
3090*4882a593Smuzhiyun 		BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
3091*4882a593Smuzhiyun 	int tlen;
3092*4882a593Smuzhiyun 	bfa_status_t status;
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 	if (ioc->stats_busy) {
3095*4882a593Smuzhiyun 		bfa_trc(ioc, ioc->stats_busy);
3096*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
3097*4882a593Smuzhiyun 	}
3098*4882a593Smuzhiyun 	ioc->stats_busy = BFA_TRUE;
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	tlen = sizeof(struct bfa_fw_stats_s);
3101*4882a593Smuzhiyun 	status = bfa_ioc_smem_clr(ioc, loff, tlen);
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 	ioc->stats_busy = BFA_FALSE;
3104*4882a593Smuzhiyun 	return status;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun /*
3108*4882a593Smuzhiyun  * Save firmware trace if configured.
3109*4882a593Smuzhiyun  */
3110*4882a593Smuzhiyun void
bfa_ioc_debug_save_ftrc(struct bfa_ioc_s * ioc)3111*4882a593Smuzhiyun bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
3112*4882a593Smuzhiyun {
3113*4882a593Smuzhiyun 	int		tlen;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	if (ioc->dbg_fwsave_once) {
3116*4882a593Smuzhiyun 		ioc->dbg_fwsave_once = BFA_FALSE;
3117*4882a593Smuzhiyun 		if (ioc->dbg_fwsave_len) {
3118*4882a593Smuzhiyun 			tlen = ioc->dbg_fwsave_len;
3119*4882a593Smuzhiyun 			bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
3120*4882a593Smuzhiyun 		}
3121*4882a593Smuzhiyun 	}
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun /*
3125*4882a593Smuzhiyun  * Firmware failure detected. Start recovery actions.
3126*4882a593Smuzhiyun  */
3127*4882a593Smuzhiyun static void
bfa_ioc_recover(struct bfa_ioc_s * ioc)3128*4882a593Smuzhiyun bfa_ioc_recover(struct bfa_ioc_s *ioc)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun 	bfa_ioc_stats(ioc, ioc_hbfails);
3131*4882a593Smuzhiyun 	ioc->stats.hb_count = ioc->hb_count;
3132*4882a593Smuzhiyun 	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
3133*4882a593Smuzhiyun }
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun /*
3136*4882a593Smuzhiyun  *  BFA IOC PF private functions
3137*4882a593Smuzhiyun  */
3138*4882a593Smuzhiyun static void
bfa_iocpf_timeout(void * ioc_arg)3139*4882a593Smuzhiyun bfa_iocpf_timeout(void *ioc_arg)
3140*4882a593Smuzhiyun {
3141*4882a593Smuzhiyun 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3142*4882a593Smuzhiyun 
3143*4882a593Smuzhiyun 	bfa_trc(ioc, 0);
3144*4882a593Smuzhiyun 	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3145*4882a593Smuzhiyun }
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun static void
bfa_iocpf_sem_timeout(void * ioc_arg)3148*4882a593Smuzhiyun bfa_iocpf_sem_timeout(void *ioc_arg)
3149*4882a593Smuzhiyun {
3150*4882a593Smuzhiyun 	struct bfa_ioc_s  *ioc = (struct bfa_ioc_s *) ioc_arg;
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 	bfa_ioc_hw_sem_get(ioc);
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun static void
bfa_ioc_poll_fwinit(struct bfa_ioc_s * ioc)3156*4882a593Smuzhiyun bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
3157*4882a593Smuzhiyun {
3158*4882a593Smuzhiyun 	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun 	bfa_trc(ioc, fwstate);
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 	if (fwstate == BFI_IOC_DISABLED) {
3163*4882a593Smuzhiyun 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3164*4882a593Smuzhiyun 		return;
3165*4882a593Smuzhiyun 	}
3166*4882a593Smuzhiyun 
3167*4882a593Smuzhiyun 	if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
3168*4882a593Smuzhiyun 		bfa_iocpf_timeout(ioc);
3169*4882a593Smuzhiyun 	else {
3170*4882a593Smuzhiyun 		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3171*4882a593Smuzhiyun 		bfa_iocpf_poll_timer_start(ioc);
3172*4882a593Smuzhiyun 	}
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun static void
bfa_iocpf_poll_timeout(void * ioc_arg)3176*4882a593Smuzhiyun bfa_iocpf_poll_timeout(void *ioc_arg)
3177*4882a593Smuzhiyun {
3178*4882a593Smuzhiyun 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	bfa_ioc_poll_fwinit(ioc);
3181*4882a593Smuzhiyun }
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun /*
3184*4882a593Smuzhiyun  *  bfa timer function
3185*4882a593Smuzhiyun  */
3186*4882a593Smuzhiyun void
bfa_timer_beat(struct bfa_timer_mod_s * mod)3187*4882a593Smuzhiyun bfa_timer_beat(struct bfa_timer_mod_s *mod)
3188*4882a593Smuzhiyun {
3189*4882a593Smuzhiyun 	struct list_head *qh = &mod->timer_q;
3190*4882a593Smuzhiyun 	struct list_head *qe, *qe_next;
3191*4882a593Smuzhiyun 	struct bfa_timer_s *elem;
3192*4882a593Smuzhiyun 	struct list_head timedout_q;
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun 	INIT_LIST_HEAD(&timedout_q);
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	qe = bfa_q_next(qh);
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 	while (qe != qh) {
3199*4882a593Smuzhiyun 		qe_next = bfa_q_next(qe);
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 		elem = (struct bfa_timer_s *) qe;
3202*4882a593Smuzhiyun 		if (elem->timeout <= BFA_TIMER_FREQ) {
3203*4882a593Smuzhiyun 			elem->timeout = 0;
3204*4882a593Smuzhiyun 			list_del(&elem->qe);
3205*4882a593Smuzhiyun 			list_add_tail(&elem->qe, &timedout_q);
3206*4882a593Smuzhiyun 		} else {
3207*4882a593Smuzhiyun 			elem->timeout -= BFA_TIMER_FREQ;
3208*4882a593Smuzhiyun 		}
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 		qe = qe_next;	/* go to next elem */
3211*4882a593Smuzhiyun 	}
3212*4882a593Smuzhiyun 
3213*4882a593Smuzhiyun 	/*
3214*4882a593Smuzhiyun 	 * Pop all the timeout entries
3215*4882a593Smuzhiyun 	 */
3216*4882a593Smuzhiyun 	while (!list_empty(&timedout_q)) {
3217*4882a593Smuzhiyun 		bfa_q_deq(&timedout_q, &elem);
3218*4882a593Smuzhiyun 		elem->timercb(elem->arg);
3219*4882a593Smuzhiyun 	}
3220*4882a593Smuzhiyun }
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun /*
3223*4882a593Smuzhiyun  * Should be called with lock protection
3224*4882a593Smuzhiyun  */
3225*4882a593Smuzhiyun void
bfa_timer_begin(struct bfa_timer_mod_s * mod,struct bfa_timer_s * timer,void (* timercb)(void *),void * arg,unsigned int timeout)3226*4882a593Smuzhiyun bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3227*4882a593Smuzhiyun 		    void (*timercb) (void *), void *arg, unsigned int timeout)
3228*4882a593Smuzhiyun {
3229*4882a593Smuzhiyun 
3230*4882a593Smuzhiyun 	WARN_ON(timercb == NULL);
3231*4882a593Smuzhiyun 	WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 	timer->timeout = timeout;
3234*4882a593Smuzhiyun 	timer->timercb = timercb;
3235*4882a593Smuzhiyun 	timer->arg = arg;
3236*4882a593Smuzhiyun 
3237*4882a593Smuzhiyun 	list_add_tail(&timer->qe, &mod->timer_q);
3238*4882a593Smuzhiyun }
3239*4882a593Smuzhiyun 
3240*4882a593Smuzhiyun /*
3241*4882a593Smuzhiyun  * Should be called with lock protection
3242*4882a593Smuzhiyun  */
3243*4882a593Smuzhiyun void
bfa_timer_stop(struct bfa_timer_s * timer)3244*4882a593Smuzhiyun bfa_timer_stop(struct bfa_timer_s *timer)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun 	WARN_ON(list_empty(&timer->qe));
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	list_del(&timer->qe);
3249*4882a593Smuzhiyun }
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun /*
3252*4882a593Smuzhiyun  *	ASIC block related
3253*4882a593Smuzhiyun  */
3254*4882a593Smuzhiyun static void
bfa_ablk_config_swap(struct bfa_ablk_cfg_s * cfg)3255*4882a593Smuzhiyun bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3256*4882a593Smuzhiyun {
3257*4882a593Smuzhiyun 	struct bfa_ablk_cfg_inst_s *cfg_inst;
3258*4882a593Smuzhiyun 	int i, j;
3259*4882a593Smuzhiyun 	u16	be16;
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	for (i = 0; i < BFA_ABLK_MAX; i++) {
3262*4882a593Smuzhiyun 		cfg_inst = &cfg->inst[i];
3263*4882a593Smuzhiyun 		for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3264*4882a593Smuzhiyun 			be16 = cfg_inst->pf_cfg[j].pers;
3265*4882a593Smuzhiyun 			cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3266*4882a593Smuzhiyun 			be16 = cfg_inst->pf_cfg[j].num_qpairs;
3267*4882a593Smuzhiyun 			cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3268*4882a593Smuzhiyun 			be16 = cfg_inst->pf_cfg[j].num_vectors;
3269*4882a593Smuzhiyun 			cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3270*4882a593Smuzhiyun 			be16 = cfg_inst->pf_cfg[j].bw_min;
3271*4882a593Smuzhiyun 			cfg_inst->pf_cfg[j].bw_min = be16_to_cpu(be16);
3272*4882a593Smuzhiyun 			be16 = cfg_inst->pf_cfg[j].bw_max;
3273*4882a593Smuzhiyun 			cfg_inst->pf_cfg[j].bw_max = be16_to_cpu(be16);
3274*4882a593Smuzhiyun 		}
3275*4882a593Smuzhiyun 	}
3276*4882a593Smuzhiyun }
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun static void
bfa_ablk_isr(void * cbarg,struct bfi_mbmsg_s * msg)3279*4882a593Smuzhiyun bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3280*4882a593Smuzhiyun {
3281*4882a593Smuzhiyun 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3282*4882a593Smuzhiyun 	struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3283*4882a593Smuzhiyun 	bfa_ablk_cbfn_t cbfn;
3284*4882a593Smuzhiyun 
3285*4882a593Smuzhiyun 	WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3286*4882a593Smuzhiyun 	bfa_trc(ablk->ioc, msg->mh.msg_id);
3287*4882a593Smuzhiyun 
3288*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
3289*4882a593Smuzhiyun 	case BFI_ABLK_I2H_QUERY:
3290*4882a593Smuzhiyun 		if (rsp->status == BFA_STATUS_OK) {
3291*4882a593Smuzhiyun 			memcpy(ablk->cfg, ablk->dma_addr.kva,
3292*4882a593Smuzhiyun 				sizeof(struct bfa_ablk_cfg_s));
3293*4882a593Smuzhiyun 			bfa_ablk_config_swap(ablk->cfg);
3294*4882a593Smuzhiyun 			ablk->cfg = NULL;
3295*4882a593Smuzhiyun 		}
3296*4882a593Smuzhiyun 		break;
3297*4882a593Smuzhiyun 
3298*4882a593Smuzhiyun 	case BFI_ABLK_I2H_ADPT_CONFIG:
3299*4882a593Smuzhiyun 	case BFI_ABLK_I2H_PORT_CONFIG:
3300*4882a593Smuzhiyun 		/* update config port mode */
3301*4882a593Smuzhiyun 		ablk->ioc->port_mode_cfg = rsp->port_mode;
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun 	case BFI_ABLK_I2H_PF_DELETE:
3304*4882a593Smuzhiyun 	case BFI_ABLK_I2H_PF_UPDATE:
3305*4882a593Smuzhiyun 	case BFI_ABLK_I2H_OPTROM_ENABLE:
3306*4882a593Smuzhiyun 	case BFI_ABLK_I2H_OPTROM_DISABLE:
3307*4882a593Smuzhiyun 		/* No-op */
3308*4882a593Smuzhiyun 		break;
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 	case BFI_ABLK_I2H_PF_CREATE:
3311*4882a593Smuzhiyun 		*(ablk->pcifn) = rsp->pcifn;
3312*4882a593Smuzhiyun 		ablk->pcifn = NULL;
3313*4882a593Smuzhiyun 		break;
3314*4882a593Smuzhiyun 
3315*4882a593Smuzhiyun 	default:
3316*4882a593Smuzhiyun 		WARN_ON(1);
3317*4882a593Smuzhiyun 	}
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun 	ablk->busy = BFA_FALSE;
3320*4882a593Smuzhiyun 	if (ablk->cbfn) {
3321*4882a593Smuzhiyun 		cbfn = ablk->cbfn;
3322*4882a593Smuzhiyun 		ablk->cbfn = NULL;
3323*4882a593Smuzhiyun 		cbfn(ablk->cbarg, rsp->status);
3324*4882a593Smuzhiyun 	}
3325*4882a593Smuzhiyun }
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun static void
bfa_ablk_notify(void * cbarg,enum bfa_ioc_event_e event)3328*4882a593Smuzhiyun bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3329*4882a593Smuzhiyun {
3330*4882a593Smuzhiyun 	struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 	bfa_trc(ablk->ioc, event);
3333*4882a593Smuzhiyun 
3334*4882a593Smuzhiyun 	switch (event) {
3335*4882a593Smuzhiyun 	case BFA_IOC_E_ENABLED:
3336*4882a593Smuzhiyun 		WARN_ON(ablk->busy != BFA_FALSE);
3337*4882a593Smuzhiyun 		break;
3338*4882a593Smuzhiyun 
3339*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
3340*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
3341*4882a593Smuzhiyun 		/* Fail any pending requests */
3342*4882a593Smuzhiyun 		ablk->pcifn = NULL;
3343*4882a593Smuzhiyun 		if (ablk->busy) {
3344*4882a593Smuzhiyun 			if (ablk->cbfn)
3345*4882a593Smuzhiyun 				ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3346*4882a593Smuzhiyun 			ablk->cbfn = NULL;
3347*4882a593Smuzhiyun 			ablk->busy = BFA_FALSE;
3348*4882a593Smuzhiyun 		}
3349*4882a593Smuzhiyun 		break;
3350*4882a593Smuzhiyun 
3351*4882a593Smuzhiyun 	default:
3352*4882a593Smuzhiyun 		WARN_ON(1);
3353*4882a593Smuzhiyun 		break;
3354*4882a593Smuzhiyun 	}
3355*4882a593Smuzhiyun }
3356*4882a593Smuzhiyun 
3357*4882a593Smuzhiyun u32
bfa_ablk_meminfo(void)3358*4882a593Smuzhiyun bfa_ablk_meminfo(void)
3359*4882a593Smuzhiyun {
3360*4882a593Smuzhiyun 	return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3361*4882a593Smuzhiyun }
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun void
bfa_ablk_memclaim(struct bfa_ablk_s * ablk,u8 * dma_kva,u64 dma_pa)3364*4882a593Smuzhiyun bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3365*4882a593Smuzhiyun {
3366*4882a593Smuzhiyun 	ablk->dma_addr.kva = dma_kva;
3367*4882a593Smuzhiyun 	ablk->dma_addr.pa  = dma_pa;
3368*4882a593Smuzhiyun }
3369*4882a593Smuzhiyun 
3370*4882a593Smuzhiyun void
bfa_ablk_attach(struct bfa_ablk_s * ablk,struct bfa_ioc_s * ioc)3371*4882a593Smuzhiyun bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3372*4882a593Smuzhiyun {
3373*4882a593Smuzhiyun 	ablk->ioc = ioc;
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
3376*4882a593Smuzhiyun 	bfa_q_qe_init(&ablk->ioc_notify);
3377*4882a593Smuzhiyun 	bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3378*4882a593Smuzhiyun 	list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3379*4882a593Smuzhiyun }
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun bfa_status_t
bfa_ablk_query(struct bfa_ablk_s * ablk,struct bfa_ablk_cfg_s * ablk_cfg,bfa_ablk_cbfn_t cbfn,void * cbarg)3382*4882a593Smuzhiyun bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3383*4882a593Smuzhiyun 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3384*4882a593Smuzhiyun {
3385*4882a593Smuzhiyun 	struct bfi_ablk_h2i_query_s *m;
3386*4882a593Smuzhiyun 
3387*4882a593Smuzhiyun 	WARN_ON(!ablk_cfg);
3388*4882a593Smuzhiyun 
3389*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3390*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3391*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3392*4882a593Smuzhiyun 	}
3393*4882a593Smuzhiyun 
3394*4882a593Smuzhiyun 	if (ablk->busy) {
3395*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3396*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3397*4882a593Smuzhiyun 	}
3398*4882a593Smuzhiyun 
3399*4882a593Smuzhiyun 	ablk->cfg = ablk_cfg;
3400*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3401*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3402*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3405*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3406*4882a593Smuzhiyun 		    bfa_ioc_portid(ablk->ioc));
3407*4882a593Smuzhiyun 	bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3408*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3411*4882a593Smuzhiyun }
3412*4882a593Smuzhiyun 
3413*4882a593Smuzhiyun bfa_status_t
bfa_ablk_pf_create(struct bfa_ablk_s * ablk,u16 * pcifn,u8 port,enum bfi_pcifn_class personality,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3414*4882a593Smuzhiyun bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3415*4882a593Smuzhiyun 		u8 port, enum bfi_pcifn_class personality,
3416*4882a593Smuzhiyun 		u16 bw_min, u16 bw_max,
3417*4882a593Smuzhiyun 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3418*4882a593Smuzhiyun {
3419*4882a593Smuzhiyun 	struct bfi_ablk_h2i_pf_req_s *m;
3420*4882a593Smuzhiyun 
3421*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3422*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3423*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3424*4882a593Smuzhiyun 	}
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 	if (ablk->busy) {
3427*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3428*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3429*4882a593Smuzhiyun 	}
3430*4882a593Smuzhiyun 
3431*4882a593Smuzhiyun 	ablk->pcifn = pcifn;
3432*4882a593Smuzhiyun 	ablk->cbfn = cbfn;
3433*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3434*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3435*4882a593Smuzhiyun 
3436*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3437*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3438*4882a593Smuzhiyun 		    bfa_ioc_portid(ablk->ioc));
3439*4882a593Smuzhiyun 	m->pers = cpu_to_be16((u16)personality);
3440*4882a593Smuzhiyun 	m->bw_min = cpu_to_be16(bw_min);
3441*4882a593Smuzhiyun 	m->bw_max = cpu_to_be16(bw_max);
3442*4882a593Smuzhiyun 	m->port = port;
3443*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3446*4882a593Smuzhiyun }
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun bfa_status_t
bfa_ablk_pf_delete(struct bfa_ablk_s * ablk,int pcifn,bfa_ablk_cbfn_t cbfn,void * cbarg)3449*4882a593Smuzhiyun bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3450*4882a593Smuzhiyun 		bfa_ablk_cbfn_t cbfn, void *cbarg)
3451*4882a593Smuzhiyun {
3452*4882a593Smuzhiyun 	struct bfi_ablk_h2i_pf_req_s *m;
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3455*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3456*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3457*4882a593Smuzhiyun 	}
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	if (ablk->busy) {
3460*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3461*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3462*4882a593Smuzhiyun 	}
3463*4882a593Smuzhiyun 
3464*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3465*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3466*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3469*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3470*4882a593Smuzhiyun 		    bfa_ioc_portid(ablk->ioc));
3471*4882a593Smuzhiyun 	m->pcifn = (u8)pcifn;
3472*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3473*4882a593Smuzhiyun 
3474*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3475*4882a593Smuzhiyun }
3476*4882a593Smuzhiyun 
3477*4882a593Smuzhiyun bfa_status_t
bfa_ablk_adapter_config(struct bfa_ablk_s * ablk,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3478*4882a593Smuzhiyun bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3479*4882a593Smuzhiyun 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3480*4882a593Smuzhiyun {
3481*4882a593Smuzhiyun 	struct bfi_ablk_h2i_cfg_req_s *m;
3482*4882a593Smuzhiyun 
3483*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3484*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3485*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3486*4882a593Smuzhiyun 	}
3487*4882a593Smuzhiyun 
3488*4882a593Smuzhiyun 	if (ablk->busy) {
3489*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3490*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3491*4882a593Smuzhiyun 	}
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3494*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3495*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3498*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3499*4882a593Smuzhiyun 		    bfa_ioc_portid(ablk->ioc));
3500*4882a593Smuzhiyun 	m->mode = (u8)mode;
3501*4882a593Smuzhiyun 	m->max_pf = (u8)max_pf;
3502*4882a593Smuzhiyun 	m->max_vf = (u8)max_vf;
3503*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3506*4882a593Smuzhiyun }
3507*4882a593Smuzhiyun 
3508*4882a593Smuzhiyun bfa_status_t
bfa_ablk_port_config(struct bfa_ablk_s * ablk,int port,enum bfa_mode_s mode,int max_pf,int max_vf,bfa_ablk_cbfn_t cbfn,void * cbarg)3509*4882a593Smuzhiyun bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3510*4882a593Smuzhiyun 		int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3511*4882a593Smuzhiyun {
3512*4882a593Smuzhiyun 	struct bfi_ablk_h2i_cfg_req_s *m;
3513*4882a593Smuzhiyun 
3514*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3515*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3516*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3517*4882a593Smuzhiyun 	}
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	if (ablk->busy) {
3520*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3521*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3522*4882a593Smuzhiyun 	}
3523*4882a593Smuzhiyun 
3524*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3525*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3526*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3527*4882a593Smuzhiyun 
3528*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3529*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3530*4882a593Smuzhiyun 		bfa_ioc_portid(ablk->ioc));
3531*4882a593Smuzhiyun 	m->port = (u8)port;
3532*4882a593Smuzhiyun 	m->mode = (u8)mode;
3533*4882a593Smuzhiyun 	m->max_pf = (u8)max_pf;
3534*4882a593Smuzhiyun 	m->max_vf = (u8)max_vf;
3535*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun bfa_status_t
bfa_ablk_pf_update(struct bfa_ablk_s * ablk,int pcifn,u16 bw_min,u16 bw_max,bfa_ablk_cbfn_t cbfn,void * cbarg)3541*4882a593Smuzhiyun bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, u16 bw_min,
3542*4882a593Smuzhiyun 		   u16 bw_max, bfa_ablk_cbfn_t cbfn, void *cbarg)
3543*4882a593Smuzhiyun {
3544*4882a593Smuzhiyun 	struct bfi_ablk_h2i_pf_req_s *m;
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3547*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3548*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3549*4882a593Smuzhiyun 	}
3550*4882a593Smuzhiyun 
3551*4882a593Smuzhiyun 	if (ablk->busy) {
3552*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3553*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3554*4882a593Smuzhiyun 	}
3555*4882a593Smuzhiyun 
3556*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3557*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3558*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3559*4882a593Smuzhiyun 
3560*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3561*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3562*4882a593Smuzhiyun 		bfa_ioc_portid(ablk->ioc));
3563*4882a593Smuzhiyun 	m->pcifn = (u8)pcifn;
3564*4882a593Smuzhiyun 	m->bw_min = cpu_to_be16(bw_min);
3565*4882a593Smuzhiyun 	m->bw_max = cpu_to_be16(bw_max);
3566*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3569*4882a593Smuzhiyun }
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun bfa_status_t
bfa_ablk_optrom_en(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3572*4882a593Smuzhiyun bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3573*4882a593Smuzhiyun {
3574*4882a593Smuzhiyun 	struct bfi_ablk_h2i_optrom_s *m;
3575*4882a593Smuzhiyun 
3576*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3577*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3578*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3579*4882a593Smuzhiyun 	}
3580*4882a593Smuzhiyun 
3581*4882a593Smuzhiyun 	if (ablk->busy) {
3582*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3583*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3584*4882a593Smuzhiyun 	}
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3587*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3588*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3591*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3592*4882a593Smuzhiyun 		bfa_ioc_portid(ablk->ioc));
3593*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3594*4882a593Smuzhiyun 
3595*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3596*4882a593Smuzhiyun }
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun bfa_status_t
bfa_ablk_optrom_dis(struct bfa_ablk_s * ablk,bfa_ablk_cbfn_t cbfn,void * cbarg)3599*4882a593Smuzhiyun bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3600*4882a593Smuzhiyun {
3601*4882a593Smuzhiyun 	struct bfi_ablk_h2i_optrom_s *m;
3602*4882a593Smuzhiyun 
3603*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(ablk->ioc)) {
3604*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3605*4882a593Smuzhiyun 		return BFA_STATUS_IOC_FAILURE;
3606*4882a593Smuzhiyun 	}
3607*4882a593Smuzhiyun 
3608*4882a593Smuzhiyun 	if (ablk->busy) {
3609*4882a593Smuzhiyun 		bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3610*4882a593Smuzhiyun 		return  BFA_STATUS_DEVBUSY;
3611*4882a593Smuzhiyun 	}
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	ablk->cbfn  = cbfn;
3614*4882a593Smuzhiyun 	ablk->cbarg = cbarg;
3615*4882a593Smuzhiyun 	ablk->busy  = BFA_TRUE;
3616*4882a593Smuzhiyun 
3617*4882a593Smuzhiyun 	m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3618*4882a593Smuzhiyun 	bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3619*4882a593Smuzhiyun 		bfa_ioc_portid(ablk->ioc));
3620*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun 	return BFA_STATUS_OK;
3623*4882a593Smuzhiyun }
3624*4882a593Smuzhiyun 
3625*4882a593Smuzhiyun /*
3626*4882a593Smuzhiyun  *	SFP module specific
3627*4882a593Smuzhiyun  */
3628*4882a593Smuzhiyun 
3629*4882a593Smuzhiyun /* forward declarations */
3630*4882a593Smuzhiyun static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3631*4882a593Smuzhiyun static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3632*4882a593Smuzhiyun static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3633*4882a593Smuzhiyun 				enum bfa_port_speed portspeed);
3634*4882a593Smuzhiyun 
3635*4882a593Smuzhiyun static void
bfa_cb_sfp_show(struct bfa_sfp_s * sfp)3636*4882a593Smuzhiyun bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3637*4882a593Smuzhiyun {
3638*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->lock);
3639*4882a593Smuzhiyun 	if (sfp->cbfn)
3640*4882a593Smuzhiyun 		sfp->cbfn(sfp->cbarg, sfp->status);
3641*4882a593Smuzhiyun 	sfp->lock = 0;
3642*4882a593Smuzhiyun 	sfp->cbfn = NULL;
3643*4882a593Smuzhiyun }
3644*4882a593Smuzhiyun 
3645*4882a593Smuzhiyun static void
bfa_cb_sfp_state_query(struct bfa_sfp_s * sfp)3646*4882a593Smuzhiyun bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3647*4882a593Smuzhiyun {
3648*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->portspeed);
3649*4882a593Smuzhiyun 	if (sfp->media) {
3650*4882a593Smuzhiyun 		bfa_sfp_media_get(sfp);
3651*4882a593Smuzhiyun 		if (sfp->state_query_cbfn)
3652*4882a593Smuzhiyun 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3653*4882a593Smuzhiyun 					sfp->status);
3654*4882a593Smuzhiyun 		sfp->media = NULL;
3655*4882a593Smuzhiyun 	}
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	if (sfp->portspeed) {
3658*4882a593Smuzhiyun 		sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3659*4882a593Smuzhiyun 		if (sfp->state_query_cbfn)
3660*4882a593Smuzhiyun 			sfp->state_query_cbfn(sfp->state_query_cbarg,
3661*4882a593Smuzhiyun 					sfp->status);
3662*4882a593Smuzhiyun 		sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3663*4882a593Smuzhiyun 	}
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	sfp->state_query_lock = 0;
3666*4882a593Smuzhiyun 	sfp->state_query_cbfn = NULL;
3667*4882a593Smuzhiyun }
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun /*
3670*4882a593Smuzhiyun  *	IOC event handler.
3671*4882a593Smuzhiyun  */
3672*4882a593Smuzhiyun static void
bfa_sfp_notify(void * sfp_arg,enum bfa_ioc_event_e event)3673*4882a593Smuzhiyun bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3674*4882a593Smuzhiyun {
3675*4882a593Smuzhiyun 	struct bfa_sfp_s *sfp = sfp_arg;
3676*4882a593Smuzhiyun 
3677*4882a593Smuzhiyun 	bfa_trc(sfp, event);
3678*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->lock);
3679*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->state_query_lock);
3680*4882a593Smuzhiyun 
3681*4882a593Smuzhiyun 	switch (event) {
3682*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
3683*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
3684*4882a593Smuzhiyun 		if (sfp->lock) {
3685*4882a593Smuzhiyun 			sfp->status = BFA_STATUS_IOC_FAILURE;
3686*4882a593Smuzhiyun 			bfa_cb_sfp_show(sfp);
3687*4882a593Smuzhiyun 		}
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 		if (sfp->state_query_lock) {
3690*4882a593Smuzhiyun 			sfp->status = BFA_STATUS_IOC_FAILURE;
3691*4882a593Smuzhiyun 			bfa_cb_sfp_state_query(sfp);
3692*4882a593Smuzhiyun 		}
3693*4882a593Smuzhiyun 		break;
3694*4882a593Smuzhiyun 
3695*4882a593Smuzhiyun 	default:
3696*4882a593Smuzhiyun 		break;
3697*4882a593Smuzhiyun 	}
3698*4882a593Smuzhiyun }
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun /*
3701*4882a593Smuzhiyun  * SFP's State Change Notification post to AEN
3702*4882a593Smuzhiyun  */
3703*4882a593Smuzhiyun static void
bfa_sfp_scn_aen_post(struct bfa_sfp_s * sfp,struct bfi_sfp_scn_s * rsp)3704*4882a593Smuzhiyun bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3705*4882a593Smuzhiyun {
3706*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3707*4882a593Smuzhiyun 	struct bfa_aen_entry_s  *aen_entry;
3708*4882a593Smuzhiyun 	enum bfa_port_aen_event aen_evt = 0;
3709*4882a593Smuzhiyun 
3710*4882a593Smuzhiyun 	bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3711*4882a593Smuzhiyun 		      ((u64)rsp->event));
3712*4882a593Smuzhiyun 
3713*4882a593Smuzhiyun 	bfad_get_aen_entry(bfad, aen_entry);
3714*4882a593Smuzhiyun 	if (!aen_entry)
3715*4882a593Smuzhiyun 		return;
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun 	aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3718*4882a593Smuzhiyun 	aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3719*4882a593Smuzhiyun 	aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3720*4882a593Smuzhiyun 
3721*4882a593Smuzhiyun 	switch (rsp->event) {
3722*4882a593Smuzhiyun 	case BFA_SFP_SCN_INSERTED:
3723*4882a593Smuzhiyun 		aen_evt = BFA_PORT_AEN_SFP_INSERT;
3724*4882a593Smuzhiyun 		break;
3725*4882a593Smuzhiyun 	case BFA_SFP_SCN_REMOVED:
3726*4882a593Smuzhiyun 		aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3727*4882a593Smuzhiyun 		break;
3728*4882a593Smuzhiyun 	case BFA_SFP_SCN_FAILED:
3729*4882a593Smuzhiyun 		aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3730*4882a593Smuzhiyun 		break;
3731*4882a593Smuzhiyun 	case BFA_SFP_SCN_UNSUPPORT:
3732*4882a593Smuzhiyun 		aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3733*4882a593Smuzhiyun 		break;
3734*4882a593Smuzhiyun 	case BFA_SFP_SCN_POM:
3735*4882a593Smuzhiyun 		aen_evt = BFA_PORT_AEN_SFP_POM;
3736*4882a593Smuzhiyun 		aen_entry->aen_data.port.level = rsp->pomlvl;
3737*4882a593Smuzhiyun 		break;
3738*4882a593Smuzhiyun 	default:
3739*4882a593Smuzhiyun 		bfa_trc(sfp, rsp->event);
3740*4882a593Smuzhiyun 		WARN_ON(1);
3741*4882a593Smuzhiyun 	}
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	/* Send the AEN notification */
3744*4882a593Smuzhiyun 	bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3745*4882a593Smuzhiyun 				  BFA_AEN_CAT_PORT, aen_evt);
3746*4882a593Smuzhiyun }
3747*4882a593Smuzhiyun 
3748*4882a593Smuzhiyun /*
3749*4882a593Smuzhiyun  *	SFP get data send
3750*4882a593Smuzhiyun  */
3751*4882a593Smuzhiyun static void
bfa_sfp_getdata_send(struct bfa_sfp_s * sfp)3752*4882a593Smuzhiyun bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3753*4882a593Smuzhiyun {
3754*4882a593Smuzhiyun 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	bfa_trc(sfp, req->memtype);
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun 	/* build host command */
3759*4882a593Smuzhiyun 	bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3760*4882a593Smuzhiyun 			bfa_ioc_portid(sfp->ioc));
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	/* send mbox cmd */
3763*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun /*
3767*4882a593Smuzhiyun  *	SFP is valid, read sfp data
3768*4882a593Smuzhiyun  */
3769*4882a593Smuzhiyun static void
bfa_sfp_getdata(struct bfa_sfp_s * sfp,enum bfi_sfp_mem_e memtype)3770*4882a593Smuzhiyun bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3771*4882a593Smuzhiyun {
3772*4882a593Smuzhiyun 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	WARN_ON(sfp->lock != 0);
3775*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->state);
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	sfp->lock = 1;
3778*4882a593Smuzhiyun 	sfp->memtype = memtype;
3779*4882a593Smuzhiyun 	req->memtype = memtype;
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun 	/* Setup SG list */
3782*4882a593Smuzhiyun 	bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3783*4882a593Smuzhiyun 
3784*4882a593Smuzhiyun 	bfa_sfp_getdata_send(sfp);
3785*4882a593Smuzhiyun }
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun /*
3788*4882a593Smuzhiyun  *	SFP scn handler
3789*4882a593Smuzhiyun  */
3790*4882a593Smuzhiyun static void
bfa_sfp_scn(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3791*4882a593Smuzhiyun bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3792*4882a593Smuzhiyun {
3793*4882a593Smuzhiyun 	struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3794*4882a593Smuzhiyun 
3795*4882a593Smuzhiyun 	switch (rsp->event) {
3796*4882a593Smuzhiyun 	case BFA_SFP_SCN_INSERTED:
3797*4882a593Smuzhiyun 		sfp->state = BFA_SFP_STATE_INSERTED;
3798*4882a593Smuzhiyun 		sfp->data_valid = 0;
3799*4882a593Smuzhiyun 		bfa_sfp_scn_aen_post(sfp, rsp);
3800*4882a593Smuzhiyun 		break;
3801*4882a593Smuzhiyun 	case BFA_SFP_SCN_REMOVED:
3802*4882a593Smuzhiyun 		sfp->state = BFA_SFP_STATE_REMOVED;
3803*4882a593Smuzhiyun 		sfp->data_valid = 0;
3804*4882a593Smuzhiyun 		bfa_sfp_scn_aen_post(sfp, rsp);
3805*4882a593Smuzhiyun 		break;
3806*4882a593Smuzhiyun 	case BFA_SFP_SCN_FAILED:
3807*4882a593Smuzhiyun 		sfp->state = BFA_SFP_STATE_FAILED;
3808*4882a593Smuzhiyun 		sfp->data_valid = 0;
3809*4882a593Smuzhiyun 		bfa_sfp_scn_aen_post(sfp, rsp);
3810*4882a593Smuzhiyun 		break;
3811*4882a593Smuzhiyun 	case BFA_SFP_SCN_UNSUPPORT:
3812*4882a593Smuzhiyun 		sfp->state = BFA_SFP_STATE_UNSUPPORT;
3813*4882a593Smuzhiyun 		bfa_sfp_scn_aen_post(sfp, rsp);
3814*4882a593Smuzhiyun 		if (!sfp->lock)
3815*4882a593Smuzhiyun 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3816*4882a593Smuzhiyun 		break;
3817*4882a593Smuzhiyun 	case BFA_SFP_SCN_POM:
3818*4882a593Smuzhiyun 		bfa_sfp_scn_aen_post(sfp, rsp);
3819*4882a593Smuzhiyun 		break;
3820*4882a593Smuzhiyun 	case BFA_SFP_SCN_VALID:
3821*4882a593Smuzhiyun 		sfp->state = BFA_SFP_STATE_VALID;
3822*4882a593Smuzhiyun 		if (!sfp->lock)
3823*4882a593Smuzhiyun 			bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3824*4882a593Smuzhiyun 		break;
3825*4882a593Smuzhiyun 	default:
3826*4882a593Smuzhiyun 		bfa_trc(sfp, rsp->event);
3827*4882a593Smuzhiyun 		WARN_ON(1);
3828*4882a593Smuzhiyun 	}
3829*4882a593Smuzhiyun }
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun /*
3832*4882a593Smuzhiyun  * SFP show complete
3833*4882a593Smuzhiyun  */
3834*4882a593Smuzhiyun static void
bfa_sfp_show_comp(struct bfa_sfp_s * sfp,struct bfi_mbmsg_s * msg)3835*4882a593Smuzhiyun bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3836*4882a593Smuzhiyun {
3837*4882a593Smuzhiyun 	struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3838*4882a593Smuzhiyun 
3839*4882a593Smuzhiyun 	if (!sfp->lock) {
3840*4882a593Smuzhiyun 		/*
3841*4882a593Smuzhiyun 		 * receiving response after ioc failure
3842*4882a593Smuzhiyun 		 */
3843*4882a593Smuzhiyun 		bfa_trc(sfp, sfp->lock);
3844*4882a593Smuzhiyun 		return;
3845*4882a593Smuzhiyun 	}
3846*4882a593Smuzhiyun 
3847*4882a593Smuzhiyun 	bfa_trc(sfp, rsp->status);
3848*4882a593Smuzhiyun 	if (rsp->status == BFA_STATUS_OK) {
3849*4882a593Smuzhiyun 		sfp->data_valid = 1;
3850*4882a593Smuzhiyun 		if (sfp->state == BFA_SFP_STATE_VALID)
3851*4882a593Smuzhiyun 			sfp->status = BFA_STATUS_OK;
3852*4882a593Smuzhiyun 		else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3853*4882a593Smuzhiyun 			sfp->status = BFA_STATUS_SFP_UNSUPP;
3854*4882a593Smuzhiyun 		else
3855*4882a593Smuzhiyun 			bfa_trc(sfp, sfp->state);
3856*4882a593Smuzhiyun 	} else {
3857*4882a593Smuzhiyun 		sfp->data_valid = 0;
3858*4882a593Smuzhiyun 		sfp->status = rsp->status;
3859*4882a593Smuzhiyun 		/* sfpshow shouldn't change sfp state */
3860*4882a593Smuzhiyun 	}
3861*4882a593Smuzhiyun 
3862*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->memtype);
3863*4882a593Smuzhiyun 	if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3864*4882a593Smuzhiyun 		bfa_trc(sfp, sfp->data_valid);
3865*4882a593Smuzhiyun 		if (sfp->data_valid) {
3866*4882a593Smuzhiyun 			u32	size = sizeof(struct sfp_mem_s);
3867*4882a593Smuzhiyun 			u8 *des = (u8 *)(sfp->sfpmem);
3868*4882a593Smuzhiyun 			memcpy(des, sfp->dbuf_kva, size);
3869*4882a593Smuzhiyun 		}
3870*4882a593Smuzhiyun 		/*
3871*4882a593Smuzhiyun 		 * Queue completion callback.
3872*4882a593Smuzhiyun 		 */
3873*4882a593Smuzhiyun 		bfa_cb_sfp_show(sfp);
3874*4882a593Smuzhiyun 	} else
3875*4882a593Smuzhiyun 		sfp->lock = 0;
3876*4882a593Smuzhiyun 
3877*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->state_query_lock);
3878*4882a593Smuzhiyun 	if (sfp->state_query_lock) {
3879*4882a593Smuzhiyun 		sfp->state = rsp->state;
3880*4882a593Smuzhiyun 		/* Complete callback */
3881*4882a593Smuzhiyun 		bfa_cb_sfp_state_query(sfp);
3882*4882a593Smuzhiyun 	}
3883*4882a593Smuzhiyun }
3884*4882a593Smuzhiyun 
3885*4882a593Smuzhiyun /*
3886*4882a593Smuzhiyun  *	SFP query fw sfp state
3887*4882a593Smuzhiyun  */
3888*4882a593Smuzhiyun static void
bfa_sfp_state_query(struct bfa_sfp_s * sfp)3889*4882a593Smuzhiyun bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3890*4882a593Smuzhiyun {
3891*4882a593Smuzhiyun 	struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	/* Should not be doing query if not in _INIT state */
3894*4882a593Smuzhiyun 	WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3895*4882a593Smuzhiyun 	WARN_ON(sfp->state_query_lock != 0);
3896*4882a593Smuzhiyun 	bfa_trc(sfp, sfp->state);
3897*4882a593Smuzhiyun 
3898*4882a593Smuzhiyun 	sfp->state_query_lock = 1;
3899*4882a593Smuzhiyun 	req->memtype = 0;
3900*4882a593Smuzhiyun 
3901*4882a593Smuzhiyun 	if (!sfp->lock)
3902*4882a593Smuzhiyun 		bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3903*4882a593Smuzhiyun }
3904*4882a593Smuzhiyun 
3905*4882a593Smuzhiyun static void
bfa_sfp_media_get(struct bfa_sfp_s * sfp)3906*4882a593Smuzhiyun bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3907*4882a593Smuzhiyun {
3908*4882a593Smuzhiyun 	enum bfa_defs_sfp_media_e *media = sfp->media;
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun 	*media = BFA_SFP_MEDIA_UNKNOWN;
3911*4882a593Smuzhiyun 
3912*4882a593Smuzhiyun 	if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3913*4882a593Smuzhiyun 		*media = BFA_SFP_MEDIA_UNSUPPORT;
3914*4882a593Smuzhiyun 	else if (sfp->state == BFA_SFP_STATE_VALID) {
3915*4882a593Smuzhiyun 		union sfp_xcvr_e10g_code_u e10g;
3916*4882a593Smuzhiyun 		struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3917*4882a593Smuzhiyun 		u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3918*4882a593Smuzhiyun 				(sfpmem->srlid_base.xcvr[5] >> 1);
3919*4882a593Smuzhiyun 
3920*4882a593Smuzhiyun 		e10g.b = sfpmem->srlid_base.xcvr[0];
3921*4882a593Smuzhiyun 		bfa_trc(sfp, e10g.b);
3922*4882a593Smuzhiyun 		bfa_trc(sfp, xmtr_tech);
3923*4882a593Smuzhiyun 		/* check fc transmitter tech */
3924*4882a593Smuzhiyun 		if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3925*4882a593Smuzhiyun 		    (xmtr_tech & SFP_XMTR_TECH_CP) ||
3926*4882a593Smuzhiyun 		    (xmtr_tech & SFP_XMTR_TECH_CA))
3927*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_CU;
3928*4882a593Smuzhiyun 		else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3929*4882a593Smuzhiyun 			 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3930*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_EL;
3931*4882a593Smuzhiyun 		else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3932*4882a593Smuzhiyun 			 (xmtr_tech & SFP_XMTR_TECH_LC))
3933*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_LW;
3934*4882a593Smuzhiyun 		else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3935*4882a593Smuzhiyun 			 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3936*4882a593Smuzhiyun 			 (xmtr_tech & SFP_XMTR_TECH_SA))
3937*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_SW;
3938*4882a593Smuzhiyun 		/* Check 10G Ethernet Compilance code */
3939*4882a593Smuzhiyun 		else if (e10g.r.e10g_sr)
3940*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_SW;
3941*4882a593Smuzhiyun 		else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
3942*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_LW;
3943*4882a593Smuzhiyun 		else if (e10g.r.e10g_unall)
3944*4882a593Smuzhiyun 			*media = BFA_SFP_MEDIA_UNKNOWN;
3945*4882a593Smuzhiyun 		else
3946*4882a593Smuzhiyun 			bfa_trc(sfp, 0);
3947*4882a593Smuzhiyun 	} else
3948*4882a593Smuzhiyun 		bfa_trc(sfp, sfp->state);
3949*4882a593Smuzhiyun }
3950*4882a593Smuzhiyun 
3951*4882a593Smuzhiyun static bfa_status_t
bfa_sfp_speed_valid(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed)3952*4882a593Smuzhiyun bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3953*4882a593Smuzhiyun {
3954*4882a593Smuzhiyun 	struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3955*4882a593Smuzhiyun 	struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3956*4882a593Smuzhiyun 	union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3957*4882a593Smuzhiyun 	union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3958*4882a593Smuzhiyun 
3959*4882a593Smuzhiyun 	if (portspeed == BFA_PORT_SPEED_10GBPS) {
3960*4882a593Smuzhiyun 		if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3961*4882a593Smuzhiyun 			return BFA_STATUS_OK;
3962*4882a593Smuzhiyun 		else {
3963*4882a593Smuzhiyun 			bfa_trc(sfp, e10g.b);
3964*4882a593Smuzhiyun 			return BFA_STATUS_UNSUPP_SPEED;
3965*4882a593Smuzhiyun 		}
3966*4882a593Smuzhiyun 	}
3967*4882a593Smuzhiyun 	if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3968*4882a593Smuzhiyun 	    ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3969*4882a593Smuzhiyun 	    ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3970*4882a593Smuzhiyun 	    ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3971*4882a593Smuzhiyun 	    ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3972*4882a593Smuzhiyun 		return BFA_STATUS_OK;
3973*4882a593Smuzhiyun 	else {
3974*4882a593Smuzhiyun 		bfa_trc(sfp, portspeed);
3975*4882a593Smuzhiyun 		bfa_trc(sfp, fc3.b);
3976*4882a593Smuzhiyun 		bfa_trc(sfp, e10g.b);
3977*4882a593Smuzhiyun 		return BFA_STATUS_UNSUPP_SPEED;
3978*4882a593Smuzhiyun 	}
3979*4882a593Smuzhiyun }
3980*4882a593Smuzhiyun 
3981*4882a593Smuzhiyun /*
3982*4882a593Smuzhiyun  *	SFP hmbox handler
3983*4882a593Smuzhiyun  */
3984*4882a593Smuzhiyun void
bfa_sfp_intr(void * sfparg,struct bfi_mbmsg_s * msg)3985*4882a593Smuzhiyun bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3986*4882a593Smuzhiyun {
3987*4882a593Smuzhiyun 	struct bfa_sfp_s *sfp = sfparg;
3988*4882a593Smuzhiyun 
3989*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
3990*4882a593Smuzhiyun 	case BFI_SFP_I2H_SHOW:
3991*4882a593Smuzhiyun 		bfa_sfp_show_comp(sfp, msg);
3992*4882a593Smuzhiyun 		break;
3993*4882a593Smuzhiyun 
3994*4882a593Smuzhiyun 	case BFI_SFP_I2H_SCN:
3995*4882a593Smuzhiyun 		bfa_sfp_scn(sfp, msg);
3996*4882a593Smuzhiyun 		break;
3997*4882a593Smuzhiyun 
3998*4882a593Smuzhiyun 	default:
3999*4882a593Smuzhiyun 		bfa_trc(sfp, msg->mh.msg_id);
4000*4882a593Smuzhiyun 		WARN_ON(1);
4001*4882a593Smuzhiyun 	}
4002*4882a593Smuzhiyun }
4003*4882a593Smuzhiyun 
4004*4882a593Smuzhiyun /*
4005*4882a593Smuzhiyun  *	Return DMA memory needed by sfp module.
4006*4882a593Smuzhiyun  */
4007*4882a593Smuzhiyun u32
bfa_sfp_meminfo(void)4008*4882a593Smuzhiyun bfa_sfp_meminfo(void)
4009*4882a593Smuzhiyun {
4010*4882a593Smuzhiyun 	return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4011*4882a593Smuzhiyun }
4012*4882a593Smuzhiyun 
4013*4882a593Smuzhiyun /*
4014*4882a593Smuzhiyun  *	Attach virtual and physical memory for SFP.
4015*4882a593Smuzhiyun  */
4016*4882a593Smuzhiyun void
bfa_sfp_attach(struct bfa_sfp_s * sfp,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod)4017*4882a593Smuzhiyun bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
4018*4882a593Smuzhiyun 		struct bfa_trc_mod_s *trcmod)
4019*4882a593Smuzhiyun {
4020*4882a593Smuzhiyun 	sfp->dev = dev;
4021*4882a593Smuzhiyun 	sfp->ioc = ioc;
4022*4882a593Smuzhiyun 	sfp->trcmod = trcmod;
4023*4882a593Smuzhiyun 
4024*4882a593Smuzhiyun 	sfp->cbfn = NULL;
4025*4882a593Smuzhiyun 	sfp->cbarg = NULL;
4026*4882a593Smuzhiyun 	sfp->sfpmem = NULL;
4027*4882a593Smuzhiyun 	sfp->lock = 0;
4028*4882a593Smuzhiyun 	sfp->data_valid = 0;
4029*4882a593Smuzhiyun 	sfp->state = BFA_SFP_STATE_INIT;
4030*4882a593Smuzhiyun 	sfp->state_query_lock = 0;
4031*4882a593Smuzhiyun 	sfp->state_query_cbfn = NULL;
4032*4882a593Smuzhiyun 	sfp->state_query_cbarg = NULL;
4033*4882a593Smuzhiyun 	sfp->media = NULL;
4034*4882a593Smuzhiyun 	sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
4035*4882a593Smuzhiyun 	sfp->is_elb = BFA_FALSE;
4036*4882a593Smuzhiyun 
4037*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
4038*4882a593Smuzhiyun 	bfa_q_qe_init(&sfp->ioc_notify);
4039*4882a593Smuzhiyun 	bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
4040*4882a593Smuzhiyun 	list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
4041*4882a593Smuzhiyun }
4042*4882a593Smuzhiyun 
4043*4882a593Smuzhiyun /*
4044*4882a593Smuzhiyun  *	Claim Memory for SFP
4045*4882a593Smuzhiyun  */
4046*4882a593Smuzhiyun void
bfa_sfp_memclaim(struct bfa_sfp_s * sfp,u8 * dm_kva,u64 dm_pa)4047*4882a593Smuzhiyun bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
4048*4882a593Smuzhiyun {
4049*4882a593Smuzhiyun 	sfp->dbuf_kva   = dm_kva;
4050*4882a593Smuzhiyun 	sfp->dbuf_pa    = dm_pa;
4051*4882a593Smuzhiyun 	memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
4052*4882a593Smuzhiyun 
4053*4882a593Smuzhiyun 	dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4054*4882a593Smuzhiyun 	dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
4055*4882a593Smuzhiyun }
4056*4882a593Smuzhiyun 
4057*4882a593Smuzhiyun /*
4058*4882a593Smuzhiyun  * Show SFP eeprom content
4059*4882a593Smuzhiyun  *
4060*4882a593Smuzhiyun  * @param[in] sfp   - bfa sfp module
4061*4882a593Smuzhiyun  *
4062*4882a593Smuzhiyun  * @param[out] sfpmem - sfp eeprom data
4063*4882a593Smuzhiyun  *
4064*4882a593Smuzhiyun  */
4065*4882a593Smuzhiyun bfa_status_t
bfa_sfp_show(struct bfa_sfp_s * sfp,struct sfp_mem_s * sfpmem,bfa_cb_sfp_t cbfn,void * cbarg)4066*4882a593Smuzhiyun bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
4067*4882a593Smuzhiyun 		bfa_cb_sfp_t cbfn, void *cbarg)
4068*4882a593Smuzhiyun {
4069*4882a593Smuzhiyun 
4070*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4071*4882a593Smuzhiyun 		bfa_trc(sfp, 0);
4072*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4073*4882a593Smuzhiyun 	}
4074*4882a593Smuzhiyun 
4075*4882a593Smuzhiyun 	if (sfp->lock) {
4076*4882a593Smuzhiyun 		bfa_trc(sfp, 0);
4077*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
4078*4882a593Smuzhiyun 	}
4079*4882a593Smuzhiyun 
4080*4882a593Smuzhiyun 	sfp->cbfn = cbfn;
4081*4882a593Smuzhiyun 	sfp->cbarg = cbarg;
4082*4882a593Smuzhiyun 	sfp->sfpmem = sfpmem;
4083*4882a593Smuzhiyun 
4084*4882a593Smuzhiyun 	bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
4085*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4086*4882a593Smuzhiyun }
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun /*
4089*4882a593Smuzhiyun  * Return SFP Media type
4090*4882a593Smuzhiyun  *
4091*4882a593Smuzhiyun  * @param[in] sfp   - bfa sfp module
4092*4882a593Smuzhiyun  *
4093*4882a593Smuzhiyun  * @param[out] media - port speed from user
4094*4882a593Smuzhiyun  *
4095*4882a593Smuzhiyun  */
4096*4882a593Smuzhiyun bfa_status_t
bfa_sfp_media(struct bfa_sfp_s * sfp,enum bfa_defs_sfp_media_e * media,bfa_cb_sfp_t cbfn,void * cbarg)4097*4882a593Smuzhiyun bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
4098*4882a593Smuzhiyun 		bfa_cb_sfp_t cbfn, void *cbarg)
4099*4882a593Smuzhiyun {
4100*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(sfp->ioc)) {
4101*4882a593Smuzhiyun 		bfa_trc(sfp, 0);
4102*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4103*4882a593Smuzhiyun 	}
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun 	sfp->media = media;
4106*4882a593Smuzhiyun 	if (sfp->state == BFA_SFP_STATE_INIT) {
4107*4882a593Smuzhiyun 		if (sfp->state_query_lock) {
4108*4882a593Smuzhiyun 			bfa_trc(sfp, 0);
4109*4882a593Smuzhiyun 			return BFA_STATUS_DEVBUSY;
4110*4882a593Smuzhiyun 		} else {
4111*4882a593Smuzhiyun 			sfp->state_query_cbfn = cbfn;
4112*4882a593Smuzhiyun 			sfp->state_query_cbarg = cbarg;
4113*4882a593Smuzhiyun 			bfa_sfp_state_query(sfp);
4114*4882a593Smuzhiyun 			return BFA_STATUS_SFP_NOT_READY;
4115*4882a593Smuzhiyun 		}
4116*4882a593Smuzhiyun 	}
4117*4882a593Smuzhiyun 
4118*4882a593Smuzhiyun 	bfa_sfp_media_get(sfp);
4119*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4120*4882a593Smuzhiyun }
4121*4882a593Smuzhiyun 
4122*4882a593Smuzhiyun /*
4123*4882a593Smuzhiyun  * Check if user set port speed is allowed by the SFP
4124*4882a593Smuzhiyun  *
4125*4882a593Smuzhiyun  * @param[in] sfp   - bfa sfp module
4126*4882a593Smuzhiyun  * @param[in] portspeed - port speed from user
4127*4882a593Smuzhiyun  *
4128*4882a593Smuzhiyun  */
4129*4882a593Smuzhiyun bfa_status_t
bfa_sfp_speed(struct bfa_sfp_s * sfp,enum bfa_port_speed portspeed,bfa_cb_sfp_t cbfn,void * cbarg)4130*4882a593Smuzhiyun bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
4131*4882a593Smuzhiyun 		bfa_cb_sfp_t cbfn, void *cbarg)
4132*4882a593Smuzhiyun {
4133*4882a593Smuzhiyun 	WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
4134*4882a593Smuzhiyun 
4135*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(sfp->ioc))
4136*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4137*4882a593Smuzhiyun 
4138*4882a593Smuzhiyun 	/* For Mezz card, all speed is allowed */
4139*4882a593Smuzhiyun 	if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
4140*4882a593Smuzhiyun 		return BFA_STATUS_OK;
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun 	/* Check SFP state */
4143*4882a593Smuzhiyun 	sfp->portspeed = portspeed;
4144*4882a593Smuzhiyun 	if (sfp->state == BFA_SFP_STATE_INIT) {
4145*4882a593Smuzhiyun 		if (sfp->state_query_lock) {
4146*4882a593Smuzhiyun 			bfa_trc(sfp, 0);
4147*4882a593Smuzhiyun 			return BFA_STATUS_DEVBUSY;
4148*4882a593Smuzhiyun 		} else {
4149*4882a593Smuzhiyun 			sfp->state_query_cbfn = cbfn;
4150*4882a593Smuzhiyun 			sfp->state_query_cbarg = cbarg;
4151*4882a593Smuzhiyun 			bfa_sfp_state_query(sfp);
4152*4882a593Smuzhiyun 			return BFA_STATUS_SFP_NOT_READY;
4153*4882a593Smuzhiyun 		}
4154*4882a593Smuzhiyun 	}
4155*4882a593Smuzhiyun 
4156*4882a593Smuzhiyun 	if (sfp->state == BFA_SFP_STATE_REMOVED ||
4157*4882a593Smuzhiyun 	    sfp->state == BFA_SFP_STATE_FAILED) {
4158*4882a593Smuzhiyun 		bfa_trc(sfp, sfp->state);
4159*4882a593Smuzhiyun 		return BFA_STATUS_NO_SFP_DEV;
4160*4882a593Smuzhiyun 	}
4161*4882a593Smuzhiyun 
4162*4882a593Smuzhiyun 	if (sfp->state == BFA_SFP_STATE_INSERTED) {
4163*4882a593Smuzhiyun 		bfa_trc(sfp, sfp->state);
4164*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;  /* sfp is reading data */
4165*4882a593Smuzhiyun 	}
4166*4882a593Smuzhiyun 
4167*4882a593Smuzhiyun 	/* For eloopback, all speed is allowed */
4168*4882a593Smuzhiyun 	if (sfp->is_elb)
4169*4882a593Smuzhiyun 		return BFA_STATUS_OK;
4170*4882a593Smuzhiyun 
4171*4882a593Smuzhiyun 	return bfa_sfp_speed_valid(sfp, portspeed);
4172*4882a593Smuzhiyun }
4173*4882a593Smuzhiyun 
4174*4882a593Smuzhiyun /*
4175*4882a593Smuzhiyun  *	Flash module specific
4176*4882a593Smuzhiyun  */
4177*4882a593Smuzhiyun 
4178*4882a593Smuzhiyun /*
4179*4882a593Smuzhiyun  * FLASH DMA buffer should be big enough to hold both MFG block and
4180*4882a593Smuzhiyun  * asic block(64k) at the same time and also should be 2k aligned to
4181*4882a593Smuzhiyun  * avoid write segement to cross sector boundary.
4182*4882a593Smuzhiyun  */
4183*4882a593Smuzhiyun #define BFA_FLASH_SEG_SZ	2048
4184*4882a593Smuzhiyun #define BFA_FLASH_DMA_BUF_SZ	\
4185*4882a593Smuzhiyun 	BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4186*4882a593Smuzhiyun 
4187*4882a593Smuzhiyun static void
bfa_flash_aen_audit_post(struct bfa_ioc_s * ioc,enum bfa_audit_aen_event event,int inst,int type)4188*4882a593Smuzhiyun bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4189*4882a593Smuzhiyun 			int inst, int type)
4190*4882a593Smuzhiyun {
4191*4882a593Smuzhiyun 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4192*4882a593Smuzhiyun 	struct bfa_aen_entry_s  *aen_entry;
4193*4882a593Smuzhiyun 
4194*4882a593Smuzhiyun 	bfad_get_aen_entry(bfad, aen_entry);
4195*4882a593Smuzhiyun 	if (!aen_entry)
4196*4882a593Smuzhiyun 		return;
4197*4882a593Smuzhiyun 
4198*4882a593Smuzhiyun 	aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4199*4882a593Smuzhiyun 	aen_entry->aen_data.audit.partition_inst = inst;
4200*4882a593Smuzhiyun 	aen_entry->aen_data.audit.partition_type = type;
4201*4882a593Smuzhiyun 
4202*4882a593Smuzhiyun 	/* Send the AEN notification */
4203*4882a593Smuzhiyun 	bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4204*4882a593Smuzhiyun 				  BFA_AEN_CAT_AUDIT, event);
4205*4882a593Smuzhiyun }
4206*4882a593Smuzhiyun 
4207*4882a593Smuzhiyun static void
bfa_flash_cb(struct bfa_flash_s * flash)4208*4882a593Smuzhiyun bfa_flash_cb(struct bfa_flash_s *flash)
4209*4882a593Smuzhiyun {
4210*4882a593Smuzhiyun 	flash->op_busy = 0;
4211*4882a593Smuzhiyun 	if (flash->cbfn)
4212*4882a593Smuzhiyun 		flash->cbfn(flash->cbarg, flash->status);
4213*4882a593Smuzhiyun }
4214*4882a593Smuzhiyun 
4215*4882a593Smuzhiyun static void
bfa_flash_notify(void * cbarg,enum bfa_ioc_event_e event)4216*4882a593Smuzhiyun bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4217*4882a593Smuzhiyun {
4218*4882a593Smuzhiyun 	struct bfa_flash_s	*flash = cbarg;
4219*4882a593Smuzhiyun 
4220*4882a593Smuzhiyun 	bfa_trc(flash, event);
4221*4882a593Smuzhiyun 	switch (event) {
4222*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
4223*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
4224*4882a593Smuzhiyun 		if (flash->op_busy) {
4225*4882a593Smuzhiyun 			flash->status = BFA_STATUS_IOC_FAILURE;
4226*4882a593Smuzhiyun 			flash->cbfn(flash->cbarg, flash->status);
4227*4882a593Smuzhiyun 			flash->op_busy = 0;
4228*4882a593Smuzhiyun 		}
4229*4882a593Smuzhiyun 		break;
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun 	default:
4232*4882a593Smuzhiyun 		break;
4233*4882a593Smuzhiyun 	}
4234*4882a593Smuzhiyun }
4235*4882a593Smuzhiyun 
4236*4882a593Smuzhiyun /*
4237*4882a593Smuzhiyun  * Send flash attribute query request.
4238*4882a593Smuzhiyun  *
4239*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4240*4882a593Smuzhiyun  */
4241*4882a593Smuzhiyun static void
bfa_flash_query_send(void * cbarg)4242*4882a593Smuzhiyun bfa_flash_query_send(void *cbarg)
4243*4882a593Smuzhiyun {
4244*4882a593Smuzhiyun 	struct bfa_flash_s *flash = cbarg;
4245*4882a593Smuzhiyun 	struct bfi_flash_query_req_s *msg =
4246*4882a593Smuzhiyun 			(struct bfi_flash_query_req_s *) flash->mb.msg;
4247*4882a593Smuzhiyun 
4248*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4249*4882a593Smuzhiyun 		bfa_ioc_portid(flash->ioc));
4250*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4251*4882a593Smuzhiyun 		flash->dbuf_pa);
4252*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4253*4882a593Smuzhiyun }
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun /*
4256*4882a593Smuzhiyun  * Send flash write request.
4257*4882a593Smuzhiyun  *
4258*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4259*4882a593Smuzhiyun  */
4260*4882a593Smuzhiyun static void
bfa_flash_write_send(struct bfa_flash_s * flash)4261*4882a593Smuzhiyun bfa_flash_write_send(struct bfa_flash_s *flash)
4262*4882a593Smuzhiyun {
4263*4882a593Smuzhiyun 	struct bfi_flash_write_req_s *msg =
4264*4882a593Smuzhiyun 			(struct bfi_flash_write_req_s *) flash->mb.msg;
4265*4882a593Smuzhiyun 	u32	len;
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun 	msg->type = be32_to_cpu(flash->type);
4268*4882a593Smuzhiyun 	msg->instance = flash->instance;
4269*4882a593Smuzhiyun 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4270*4882a593Smuzhiyun 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4271*4882a593Smuzhiyun 		flash->residue : BFA_FLASH_DMA_BUF_SZ;
4272*4882a593Smuzhiyun 	msg->length = be32_to_cpu(len);
4273*4882a593Smuzhiyun 
4274*4882a593Smuzhiyun 	/* indicate if it's the last msg of the whole write operation */
4275*4882a593Smuzhiyun 	msg->last = (len == flash->residue) ? 1 : 0;
4276*4882a593Smuzhiyun 
4277*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4278*4882a593Smuzhiyun 			bfa_ioc_portid(flash->ioc));
4279*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4280*4882a593Smuzhiyun 	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4281*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4282*4882a593Smuzhiyun 
4283*4882a593Smuzhiyun 	flash->residue -= len;
4284*4882a593Smuzhiyun 	flash->offset += len;
4285*4882a593Smuzhiyun }
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun /*
4288*4882a593Smuzhiyun  * Send flash read request.
4289*4882a593Smuzhiyun  *
4290*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4291*4882a593Smuzhiyun  */
4292*4882a593Smuzhiyun static void
bfa_flash_read_send(void * cbarg)4293*4882a593Smuzhiyun bfa_flash_read_send(void *cbarg)
4294*4882a593Smuzhiyun {
4295*4882a593Smuzhiyun 	struct bfa_flash_s *flash = cbarg;
4296*4882a593Smuzhiyun 	struct bfi_flash_read_req_s *msg =
4297*4882a593Smuzhiyun 			(struct bfi_flash_read_req_s *) flash->mb.msg;
4298*4882a593Smuzhiyun 	u32	len;
4299*4882a593Smuzhiyun 
4300*4882a593Smuzhiyun 	msg->type = be32_to_cpu(flash->type);
4301*4882a593Smuzhiyun 	msg->instance = flash->instance;
4302*4882a593Smuzhiyun 	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4303*4882a593Smuzhiyun 	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4304*4882a593Smuzhiyun 			flash->residue : BFA_FLASH_DMA_BUF_SZ;
4305*4882a593Smuzhiyun 	msg->length = be32_to_cpu(len);
4306*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4307*4882a593Smuzhiyun 		bfa_ioc_portid(flash->ioc));
4308*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4309*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4310*4882a593Smuzhiyun }
4311*4882a593Smuzhiyun 
4312*4882a593Smuzhiyun /*
4313*4882a593Smuzhiyun  * Send flash erase request.
4314*4882a593Smuzhiyun  *
4315*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4316*4882a593Smuzhiyun  */
4317*4882a593Smuzhiyun static void
bfa_flash_erase_send(void * cbarg)4318*4882a593Smuzhiyun bfa_flash_erase_send(void *cbarg)
4319*4882a593Smuzhiyun {
4320*4882a593Smuzhiyun 	struct bfa_flash_s *flash = cbarg;
4321*4882a593Smuzhiyun 	struct bfi_flash_erase_req_s *msg =
4322*4882a593Smuzhiyun 			(struct bfi_flash_erase_req_s *) flash->mb.msg;
4323*4882a593Smuzhiyun 
4324*4882a593Smuzhiyun 	msg->type = be32_to_cpu(flash->type);
4325*4882a593Smuzhiyun 	msg->instance = flash->instance;
4326*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4327*4882a593Smuzhiyun 			bfa_ioc_portid(flash->ioc));
4328*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4329*4882a593Smuzhiyun }
4330*4882a593Smuzhiyun 
4331*4882a593Smuzhiyun /*
4332*4882a593Smuzhiyun  * Process flash response messages upon receiving interrupts.
4333*4882a593Smuzhiyun  *
4334*4882a593Smuzhiyun  * @param[in] flasharg - flash structure
4335*4882a593Smuzhiyun  * @param[in] msg - message structure
4336*4882a593Smuzhiyun  */
4337*4882a593Smuzhiyun static void
bfa_flash_intr(void * flasharg,struct bfi_mbmsg_s * msg)4338*4882a593Smuzhiyun bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4339*4882a593Smuzhiyun {
4340*4882a593Smuzhiyun 	struct bfa_flash_s *flash = flasharg;
4341*4882a593Smuzhiyun 	u32	status;
4342*4882a593Smuzhiyun 
4343*4882a593Smuzhiyun 	union {
4344*4882a593Smuzhiyun 		struct bfi_flash_query_rsp_s *query;
4345*4882a593Smuzhiyun 		struct bfi_flash_erase_rsp_s *erase;
4346*4882a593Smuzhiyun 		struct bfi_flash_write_rsp_s *write;
4347*4882a593Smuzhiyun 		struct bfi_flash_read_rsp_s *read;
4348*4882a593Smuzhiyun 		struct bfi_flash_event_s *event;
4349*4882a593Smuzhiyun 		struct bfi_mbmsg_s   *msg;
4350*4882a593Smuzhiyun 	} m;
4351*4882a593Smuzhiyun 
4352*4882a593Smuzhiyun 	m.msg = msg;
4353*4882a593Smuzhiyun 	bfa_trc(flash, msg->mh.msg_id);
4354*4882a593Smuzhiyun 
4355*4882a593Smuzhiyun 	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4356*4882a593Smuzhiyun 		/* receiving response after ioc failure */
4357*4882a593Smuzhiyun 		bfa_trc(flash, 0x9999);
4358*4882a593Smuzhiyun 		return;
4359*4882a593Smuzhiyun 	}
4360*4882a593Smuzhiyun 
4361*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
4362*4882a593Smuzhiyun 	case BFI_FLASH_I2H_QUERY_RSP:
4363*4882a593Smuzhiyun 		status = be32_to_cpu(m.query->status);
4364*4882a593Smuzhiyun 		bfa_trc(flash, status);
4365*4882a593Smuzhiyun 		if (status == BFA_STATUS_OK) {
4366*4882a593Smuzhiyun 			u32	i;
4367*4882a593Smuzhiyun 			struct bfa_flash_attr_s *attr, *f;
4368*4882a593Smuzhiyun 
4369*4882a593Smuzhiyun 			attr = (struct bfa_flash_attr_s *) flash->ubuf;
4370*4882a593Smuzhiyun 			f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4371*4882a593Smuzhiyun 			attr->status = be32_to_cpu(f->status);
4372*4882a593Smuzhiyun 			attr->npart = be32_to_cpu(f->npart);
4373*4882a593Smuzhiyun 			bfa_trc(flash, attr->status);
4374*4882a593Smuzhiyun 			bfa_trc(flash, attr->npart);
4375*4882a593Smuzhiyun 			for (i = 0; i < attr->npart; i++) {
4376*4882a593Smuzhiyun 				attr->part[i].part_type =
4377*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_type);
4378*4882a593Smuzhiyun 				attr->part[i].part_instance =
4379*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_instance);
4380*4882a593Smuzhiyun 				attr->part[i].part_off =
4381*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_off);
4382*4882a593Smuzhiyun 				attr->part[i].part_size =
4383*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_size);
4384*4882a593Smuzhiyun 				attr->part[i].part_len =
4385*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_len);
4386*4882a593Smuzhiyun 				attr->part[i].part_status =
4387*4882a593Smuzhiyun 					be32_to_cpu(f->part[i].part_status);
4388*4882a593Smuzhiyun 			}
4389*4882a593Smuzhiyun 		}
4390*4882a593Smuzhiyun 		flash->status = status;
4391*4882a593Smuzhiyun 		bfa_flash_cb(flash);
4392*4882a593Smuzhiyun 		break;
4393*4882a593Smuzhiyun 	case BFI_FLASH_I2H_ERASE_RSP:
4394*4882a593Smuzhiyun 		status = be32_to_cpu(m.erase->status);
4395*4882a593Smuzhiyun 		bfa_trc(flash, status);
4396*4882a593Smuzhiyun 		flash->status = status;
4397*4882a593Smuzhiyun 		bfa_flash_cb(flash);
4398*4882a593Smuzhiyun 		break;
4399*4882a593Smuzhiyun 	case BFI_FLASH_I2H_WRITE_RSP:
4400*4882a593Smuzhiyun 		status = be32_to_cpu(m.write->status);
4401*4882a593Smuzhiyun 		bfa_trc(flash, status);
4402*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK || flash->residue == 0) {
4403*4882a593Smuzhiyun 			flash->status = status;
4404*4882a593Smuzhiyun 			bfa_flash_cb(flash);
4405*4882a593Smuzhiyun 		} else {
4406*4882a593Smuzhiyun 			bfa_trc(flash, flash->offset);
4407*4882a593Smuzhiyun 			bfa_flash_write_send(flash);
4408*4882a593Smuzhiyun 		}
4409*4882a593Smuzhiyun 		break;
4410*4882a593Smuzhiyun 	case BFI_FLASH_I2H_READ_RSP:
4411*4882a593Smuzhiyun 		status = be32_to_cpu(m.read->status);
4412*4882a593Smuzhiyun 		bfa_trc(flash, status);
4413*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK) {
4414*4882a593Smuzhiyun 			flash->status = status;
4415*4882a593Smuzhiyun 			bfa_flash_cb(flash);
4416*4882a593Smuzhiyun 		} else {
4417*4882a593Smuzhiyun 			u32 len = be32_to_cpu(m.read->length);
4418*4882a593Smuzhiyun 			bfa_trc(flash, flash->offset);
4419*4882a593Smuzhiyun 			bfa_trc(flash, len);
4420*4882a593Smuzhiyun 			memcpy(flash->ubuf + flash->offset,
4421*4882a593Smuzhiyun 				flash->dbuf_kva, len);
4422*4882a593Smuzhiyun 			flash->residue -= len;
4423*4882a593Smuzhiyun 			flash->offset += len;
4424*4882a593Smuzhiyun 			if (flash->residue == 0) {
4425*4882a593Smuzhiyun 				flash->status = status;
4426*4882a593Smuzhiyun 				bfa_flash_cb(flash);
4427*4882a593Smuzhiyun 			} else
4428*4882a593Smuzhiyun 				bfa_flash_read_send(flash);
4429*4882a593Smuzhiyun 		}
4430*4882a593Smuzhiyun 		break;
4431*4882a593Smuzhiyun 	case BFI_FLASH_I2H_BOOT_VER_RSP:
4432*4882a593Smuzhiyun 		break;
4433*4882a593Smuzhiyun 	case BFI_FLASH_I2H_EVENT:
4434*4882a593Smuzhiyun 		status = be32_to_cpu(m.event->status);
4435*4882a593Smuzhiyun 		bfa_trc(flash, status);
4436*4882a593Smuzhiyun 		if (status == BFA_STATUS_BAD_FWCFG)
4437*4882a593Smuzhiyun 			bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4438*4882a593Smuzhiyun 		else if (status == BFA_STATUS_INVALID_VENDOR) {
4439*4882a593Smuzhiyun 			u32 param;
4440*4882a593Smuzhiyun 			param = be32_to_cpu(m.event->param);
4441*4882a593Smuzhiyun 			bfa_trc(flash, param);
4442*4882a593Smuzhiyun 			bfa_ioc_aen_post(flash->ioc,
4443*4882a593Smuzhiyun 				BFA_IOC_AEN_INVALID_VENDOR);
4444*4882a593Smuzhiyun 		}
4445*4882a593Smuzhiyun 		break;
4446*4882a593Smuzhiyun 
4447*4882a593Smuzhiyun 	default:
4448*4882a593Smuzhiyun 		WARN_ON(1);
4449*4882a593Smuzhiyun 	}
4450*4882a593Smuzhiyun }
4451*4882a593Smuzhiyun 
4452*4882a593Smuzhiyun /*
4453*4882a593Smuzhiyun  * Flash memory info API.
4454*4882a593Smuzhiyun  *
4455*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
4456*4882a593Smuzhiyun  */
4457*4882a593Smuzhiyun u32
bfa_flash_meminfo(bfa_boolean_t mincfg)4458*4882a593Smuzhiyun bfa_flash_meminfo(bfa_boolean_t mincfg)
4459*4882a593Smuzhiyun {
4460*4882a593Smuzhiyun 	/* min driver doesn't need flash */
4461*4882a593Smuzhiyun 	if (mincfg)
4462*4882a593Smuzhiyun 		return 0;
4463*4882a593Smuzhiyun 	return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4464*4882a593Smuzhiyun }
4465*4882a593Smuzhiyun 
4466*4882a593Smuzhiyun /*
4467*4882a593Smuzhiyun  * Flash attach API.
4468*4882a593Smuzhiyun  *
4469*4882a593Smuzhiyun  * @param[in] flash - flash structure
4470*4882a593Smuzhiyun  * @param[in] ioc  - ioc structure
4471*4882a593Smuzhiyun  * @param[in] dev  - device structure
4472*4882a593Smuzhiyun  * @param[in] trcmod - trace module
4473*4882a593Smuzhiyun  * @param[in] logmod - log module
4474*4882a593Smuzhiyun  */
4475*4882a593Smuzhiyun void
bfa_flash_attach(struct bfa_flash_s * flash,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)4476*4882a593Smuzhiyun bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4477*4882a593Smuzhiyun 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4478*4882a593Smuzhiyun {
4479*4882a593Smuzhiyun 	flash->ioc = ioc;
4480*4882a593Smuzhiyun 	flash->trcmod = trcmod;
4481*4882a593Smuzhiyun 	flash->cbfn = NULL;
4482*4882a593Smuzhiyun 	flash->cbarg = NULL;
4483*4882a593Smuzhiyun 	flash->op_busy = 0;
4484*4882a593Smuzhiyun 
4485*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4486*4882a593Smuzhiyun 	bfa_q_qe_init(&flash->ioc_notify);
4487*4882a593Smuzhiyun 	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4488*4882a593Smuzhiyun 	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4489*4882a593Smuzhiyun 
4490*4882a593Smuzhiyun 	/* min driver doesn't need flash */
4491*4882a593Smuzhiyun 	if (mincfg) {
4492*4882a593Smuzhiyun 		flash->dbuf_kva = NULL;
4493*4882a593Smuzhiyun 		flash->dbuf_pa = 0;
4494*4882a593Smuzhiyun 	}
4495*4882a593Smuzhiyun }
4496*4882a593Smuzhiyun 
4497*4882a593Smuzhiyun /*
4498*4882a593Smuzhiyun  * Claim memory for flash
4499*4882a593Smuzhiyun  *
4500*4882a593Smuzhiyun  * @param[in] flash - flash structure
4501*4882a593Smuzhiyun  * @param[in] dm_kva - pointer to virtual memory address
4502*4882a593Smuzhiyun  * @param[in] dm_pa - physical memory address
4503*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
4504*4882a593Smuzhiyun  */
4505*4882a593Smuzhiyun void
bfa_flash_memclaim(struct bfa_flash_s * flash,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)4506*4882a593Smuzhiyun bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4507*4882a593Smuzhiyun 		bfa_boolean_t mincfg)
4508*4882a593Smuzhiyun {
4509*4882a593Smuzhiyun 	if (mincfg)
4510*4882a593Smuzhiyun 		return;
4511*4882a593Smuzhiyun 
4512*4882a593Smuzhiyun 	flash->dbuf_kva = dm_kva;
4513*4882a593Smuzhiyun 	flash->dbuf_pa = dm_pa;
4514*4882a593Smuzhiyun 	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4515*4882a593Smuzhiyun 	dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4516*4882a593Smuzhiyun 	dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4517*4882a593Smuzhiyun }
4518*4882a593Smuzhiyun 
4519*4882a593Smuzhiyun /*
4520*4882a593Smuzhiyun  * Get flash attribute.
4521*4882a593Smuzhiyun  *
4522*4882a593Smuzhiyun  * @param[in] flash - flash structure
4523*4882a593Smuzhiyun  * @param[in] attr - flash attribute structure
4524*4882a593Smuzhiyun  * @param[in] cbfn - callback function
4525*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4526*4882a593Smuzhiyun  *
4527*4882a593Smuzhiyun  * Return status.
4528*4882a593Smuzhiyun  */
4529*4882a593Smuzhiyun bfa_status_t
bfa_flash_get_attr(struct bfa_flash_s * flash,struct bfa_flash_attr_s * attr,bfa_cb_flash_t cbfn,void * cbarg)4530*4882a593Smuzhiyun bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4531*4882a593Smuzhiyun 		bfa_cb_flash_t cbfn, void *cbarg)
4532*4882a593Smuzhiyun {
4533*4882a593Smuzhiyun 	bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4534*4882a593Smuzhiyun 
4535*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(flash->ioc))
4536*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4537*4882a593Smuzhiyun 
4538*4882a593Smuzhiyun 	if (flash->op_busy) {
4539*4882a593Smuzhiyun 		bfa_trc(flash, flash->op_busy);
4540*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
4541*4882a593Smuzhiyun 	}
4542*4882a593Smuzhiyun 
4543*4882a593Smuzhiyun 	flash->op_busy = 1;
4544*4882a593Smuzhiyun 	flash->cbfn = cbfn;
4545*4882a593Smuzhiyun 	flash->cbarg = cbarg;
4546*4882a593Smuzhiyun 	flash->ubuf = (u8 *) attr;
4547*4882a593Smuzhiyun 	bfa_flash_query_send(flash);
4548*4882a593Smuzhiyun 
4549*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4550*4882a593Smuzhiyun }
4551*4882a593Smuzhiyun 
4552*4882a593Smuzhiyun /*
4553*4882a593Smuzhiyun  * Erase flash partition.
4554*4882a593Smuzhiyun  *
4555*4882a593Smuzhiyun  * @param[in] flash - flash structure
4556*4882a593Smuzhiyun  * @param[in] type - flash partition type
4557*4882a593Smuzhiyun  * @param[in] instance - flash partition instance
4558*4882a593Smuzhiyun  * @param[in] cbfn - callback function
4559*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4560*4882a593Smuzhiyun  *
4561*4882a593Smuzhiyun  * Return status.
4562*4882a593Smuzhiyun  */
4563*4882a593Smuzhiyun bfa_status_t
bfa_flash_erase_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,bfa_cb_flash_t cbfn,void * cbarg)4564*4882a593Smuzhiyun bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4565*4882a593Smuzhiyun 		u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4566*4882a593Smuzhiyun {
4567*4882a593Smuzhiyun 	bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4568*4882a593Smuzhiyun 	bfa_trc(flash, type);
4569*4882a593Smuzhiyun 	bfa_trc(flash, instance);
4570*4882a593Smuzhiyun 
4571*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(flash->ioc))
4572*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4573*4882a593Smuzhiyun 
4574*4882a593Smuzhiyun 	if (flash->op_busy) {
4575*4882a593Smuzhiyun 		bfa_trc(flash, flash->op_busy);
4576*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
4577*4882a593Smuzhiyun 	}
4578*4882a593Smuzhiyun 
4579*4882a593Smuzhiyun 	flash->op_busy = 1;
4580*4882a593Smuzhiyun 	flash->cbfn = cbfn;
4581*4882a593Smuzhiyun 	flash->cbarg = cbarg;
4582*4882a593Smuzhiyun 	flash->type = type;
4583*4882a593Smuzhiyun 	flash->instance = instance;
4584*4882a593Smuzhiyun 
4585*4882a593Smuzhiyun 	bfa_flash_erase_send(flash);
4586*4882a593Smuzhiyun 	bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4587*4882a593Smuzhiyun 				instance, type);
4588*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4589*4882a593Smuzhiyun }
4590*4882a593Smuzhiyun 
4591*4882a593Smuzhiyun /*
4592*4882a593Smuzhiyun  * Update flash partition.
4593*4882a593Smuzhiyun  *
4594*4882a593Smuzhiyun  * @param[in] flash - flash structure
4595*4882a593Smuzhiyun  * @param[in] type - flash partition type
4596*4882a593Smuzhiyun  * @param[in] instance - flash partition instance
4597*4882a593Smuzhiyun  * @param[in] buf - update data buffer
4598*4882a593Smuzhiyun  * @param[in] len - data buffer length
4599*4882a593Smuzhiyun  * @param[in] offset - offset relative to the partition starting address
4600*4882a593Smuzhiyun  * @param[in] cbfn - callback function
4601*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4602*4882a593Smuzhiyun  *
4603*4882a593Smuzhiyun  * Return status.
4604*4882a593Smuzhiyun  */
4605*4882a593Smuzhiyun bfa_status_t
bfa_flash_update_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4606*4882a593Smuzhiyun bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4607*4882a593Smuzhiyun 		u8 instance, void *buf, u32 len, u32 offset,
4608*4882a593Smuzhiyun 		bfa_cb_flash_t cbfn, void *cbarg)
4609*4882a593Smuzhiyun {
4610*4882a593Smuzhiyun 	bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4611*4882a593Smuzhiyun 	bfa_trc(flash, type);
4612*4882a593Smuzhiyun 	bfa_trc(flash, instance);
4613*4882a593Smuzhiyun 	bfa_trc(flash, len);
4614*4882a593Smuzhiyun 	bfa_trc(flash, offset);
4615*4882a593Smuzhiyun 
4616*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(flash->ioc))
4617*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4618*4882a593Smuzhiyun 
4619*4882a593Smuzhiyun 	/*
4620*4882a593Smuzhiyun 	 * 'len' must be in word (4-byte) boundary
4621*4882a593Smuzhiyun 	 * 'offset' must be in sector (16kb) boundary
4622*4882a593Smuzhiyun 	 */
4623*4882a593Smuzhiyun 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4624*4882a593Smuzhiyun 		return BFA_STATUS_FLASH_BAD_LEN;
4625*4882a593Smuzhiyun 
4626*4882a593Smuzhiyun 	if (type == BFA_FLASH_PART_MFG)
4627*4882a593Smuzhiyun 		return BFA_STATUS_EINVAL;
4628*4882a593Smuzhiyun 
4629*4882a593Smuzhiyun 	if (flash->op_busy) {
4630*4882a593Smuzhiyun 		bfa_trc(flash, flash->op_busy);
4631*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
4632*4882a593Smuzhiyun 	}
4633*4882a593Smuzhiyun 
4634*4882a593Smuzhiyun 	flash->op_busy = 1;
4635*4882a593Smuzhiyun 	flash->cbfn = cbfn;
4636*4882a593Smuzhiyun 	flash->cbarg = cbarg;
4637*4882a593Smuzhiyun 	flash->type = type;
4638*4882a593Smuzhiyun 	flash->instance = instance;
4639*4882a593Smuzhiyun 	flash->residue = len;
4640*4882a593Smuzhiyun 	flash->offset = 0;
4641*4882a593Smuzhiyun 	flash->addr_off = offset;
4642*4882a593Smuzhiyun 	flash->ubuf = buf;
4643*4882a593Smuzhiyun 
4644*4882a593Smuzhiyun 	bfa_flash_write_send(flash);
4645*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4646*4882a593Smuzhiyun }
4647*4882a593Smuzhiyun 
4648*4882a593Smuzhiyun /*
4649*4882a593Smuzhiyun  * Read flash partition.
4650*4882a593Smuzhiyun  *
4651*4882a593Smuzhiyun  * @param[in] flash - flash structure
4652*4882a593Smuzhiyun  * @param[in] type - flash partition type
4653*4882a593Smuzhiyun  * @param[in] instance - flash partition instance
4654*4882a593Smuzhiyun  * @param[in] buf - read data buffer
4655*4882a593Smuzhiyun  * @param[in] len - data buffer length
4656*4882a593Smuzhiyun  * @param[in] offset - offset relative to the partition starting address
4657*4882a593Smuzhiyun  * @param[in] cbfn - callback function
4658*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
4659*4882a593Smuzhiyun  *
4660*4882a593Smuzhiyun  * Return status.
4661*4882a593Smuzhiyun  */
4662*4882a593Smuzhiyun bfa_status_t
bfa_flash_read_part(struct bfa_flash_s * flash,enum bfa_flash_part_type type,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_flash_t cbfn,void * cbarg)4663*4882a593Smuzhiyun bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4664*4882a593Smuzhiyun 		u8 instance, void *buf, u32 len, u32 offset,
4665*4882a593Smuzhiyun 		bfa_cb_flash_t cbfn, void *cbarg)
4666*4882a593Smuzhiyun {
4667*4882a593Smuzhiyun 	bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4668*4882a593Smuzhiyun 	bfa_trc(flash, type);
4669*4882a593Smuzhiyun 	bfa_trc(flash, instance);
4670*4882a593Smuzhiyun 	bfa_trc(flash, len);
4671*4882a593Smuzhiyun 	bfa_trc(flash, offset);
4672*4882a593Smuzhiyun 
4673*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(flash->ioc))
4674*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
4675*4882a593Smuzhiyun 
4676*4882a593Smuzhiyun 	/*
4677*4882a593Smuzhiyun 	 * 'len' must be in word (4-byte) boundary
4678*4882a593Smuzhiyun 	 * 'offset' must be in sector (16kb) boundary
4679*4882a593Smuzhiyun 	 */
4680*4882a593Smuzhiyun 	if (!len || (len & 0x03) || (offset & 0x00003FFF))
4681*4882a593Smuzhiyun 		return BFA_STATUS_FLASH_BAD_LEN;
4682*4882a593Smuzhiyun 
4683*4882a593Smuzhiyun 	if (flash->op_busy) {
4684*4882a593Smuzhiyun 		bfa_trc(flash, flash->op_busy);
4685*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
4686*4882a593Smuzhiyun 	}
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	flash->op_busy = 1;
4689*4882a593Smuzhiyun 	flash->cbfn = cbfn;
4690*4882a593Smuzhiyun 	flash->cbarg = cbarg;
4691*4882a593Smuzhiyun 	flash->type = type;
4692*4882a593Smuzhiyun 	flash->instance = instance;
4693*4882a593Smuzhiyun 	flash->residue = len;
4694*4882a593Smuzhiyun 	flash->offset = 0;
4695*4882a593Smuzhiyun 	flash->addr_off = offset;
4696*4882a593Smuzhiyun 	flash->ubuf = buf;
4697*4882a593Smuzhiyun 	bfa_flash_read_send(flash);
4698*4882a593Smuzhiyun 
4699*4882a593Smuzhiyun 	return BFA_STATUS_OK;
4700*4882a593Smuzhiyun }
4701*4882a593Smuzhiyun 
4702*4882a593Smuzhiyun /*
4703*4882a593Smuzhiyun  *	DIAG module specific
4704*4882a593Smuzhiyun  */
4705*4882a593Smuzhiyun 
4706*4882a593Smuzhiyun #define BFA_DIAG_MEMTEST_TOV	50000	/* memtest timeout in msec */
4707*4882a593Smuzhiyun #define CT2_BFA_DIAG_MEMTEST_TOV	(9*30*1000)  /* 4.5 min */
4708*4882a593Smuzhiyun 
4709*4882a593Smuzhiyun /* IOC event handler */
4710*4882a593Smuzhiyun static void
bfa_diag_notify(void * diag_arg,enum bfa_ioc_event_e event)4711*4882a593Smuzhiyun bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4712*4882a593Smuzhiyun {
4713*4882a593Smuzhiyun 	struct bfa_diag_s *diag = diag_arg;
4714*4882a593Smuzhiyun 
4715*4882a593Smuzhiyun 	bfa_trc(diag, event);
4716*4882a593Smuzhiyun 	bfa_trc(diag, diag->block);
4717*4882a593Smuzhiyun 	bfa_trc(diag, diag->fwping.lock);
4718*4882a593Smuzhiyun 	bfa_trc(diag, diag->tsensor.lock);
4719*4882a593Smuzhiyun 
4720*4882a593Smuzhiyun 	switch (event) {
4721*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
4722*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
4723*4882a593Smuzhiyun 		if (diag->fwping.lock) {
4724*4882a593Smuzhiyun 			diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4725*4882a593Smuzhiyun 			diag->fwping.cbfn(diag->fwping.cbarg,
4726*4882a593Smuzhiyun 					diag->fwping.status);
4727*4882a593Smuzhiyun 			diag->fwping.lock = 0;
4728*4882a593Smuzhiyun 		}
4729*4882a593Smuzhiyun 
4730*4882a593Smuzhiyun 		if (diag->tsensor.lock) {
4731*4882a593Smuzhiyun 			diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4732*4882a593Smuzhiyun 			diag->tsensor.cbfn(diag->tsensor.cbarg,
4733*4882a593Smuzhiyun 					   diag->tsensor.status);
4734*4882a593Smuzhiyun 			diag->tsensor.lock = 0;
4735*4882a593Smuzhiyun 		}
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 		if (diag->block) {
4738*4882a593Smuzhiyun 			if (diag->timer_active) {
4739*4882a593Smuzhiyun 				bfa_timer_stop(&diag->timer);
4740*4882a593Smuzhiyun 				diag->timer_active = 0;
4741*4882a593Smuzhiyun 			}
4742*4882a593Smuzhiyun 
4743*4882a593Smuzhiyun 			diag->status = BFA_STATUS_IOC_FAILURE;
4744*4882a593Smuzhiyun 			diag->cbfn(diag->cbarg, diag->status);
4745*4882a593Smuzhiyun 			diag->block = 0;
4746*4882a593Smuzhiyun 		}
4747*4882a593Smuzhiyun 		break;
4748*4882a593Smuzhiyun 
4749*4882a593Smuzhiyun 	default:
4750*4882a593Smuzhiyun 		break;
4751*4882a593Smuzhiyun 	}
4752*4882a593Smuzhiyun }
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun static void
bfa_diag_memtest_done(void * cbarg)4755*4882a593Smuzhiyun bfa_diag_memtest_done(void *cbarg)
4756*4882a593Smuzhiyun {
4757*4882a593Smuzhiyun 	struct bfa_diag_s *diag = cbarg;
4758*4882a593Smuzhiyun 	struct bfa_ioc_s  *ioc = diag->ioc;
4759*4882a593Smuzhiyun 	struct bfa_diag_memtest_result *res = diag->result;
4760*4882a593Smuzhiyun 	u32	loff = BFI_BOOT_MEMTEST_RES_ADDR;
4761*4882a593Smuzhiyun 	u32	pgnum, i;
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun 	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4764*4882a593Smuzhiyun 	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 	for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4767*4882a593Smuzhiyun 			 sizeof(u32)); i++) {
4768*4882a593Smuzhiyun 		/* read test result from smem */
4769*4882a593Smuzhiyun 		*((u32 *) res + i) =
4770*4882a593Smuzhiyun 			bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4771*4882a593Smuzhiyun 		loff += sizeof(u32);
4772*4882a593Smuzhiyun 	}
4773*4882a593Smuzhiyun 
4774*4882a593Smuzhiyun 	/* Reset IOC fwstates to BFI_IOC_UNINIT */
4775*4882a593Smuzhiyun 	bfa_ioc_reset_fwstate(ioc);
4776*4882a593Smuzhiyun 
4777*4882a593Smuzhiyun 	res->status = swab32(res->status);
4778*4882a593Smuzhiyun 	bfa_trc(diag, res->status);
4779*4882a593Smuzhiyun 
4780*4882a593Smuzhiyun 	if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4781*4882a593Smuzhiyun 		diag->status = BFA_STATUS_OK;
4782*4882a593Smuzhiyun 	else {
4783*4882a593Smuzhiyun 		diag->status = BFA_STATUS_MEMTEST_FAILED;
4784*4882a593Smuzhiyun 		res->addr = swab32(res->addr);
4785*4882a593Smuzhiyun 		res->exp = swab32(res->exp);
4786*4882a593Smuzhiyun 		res->act = swab32(res->act);
4787*4882a593Smuzhiyun 		res->err_status = swab32(res->err_status);
4788*4882a593Smuzhiyun 		res->err_status1 = swab32(res->err_status1);
4789*4882a593Smuzhiyun 		res->err_addr = swab32(res->err_addr);
4790*4882a593Smuzhiyun 		bfa_trc(diag, res->addr);
4791*4882a593Smuzhiyun 		bfa_trc(diag, res->exp);
4792*4882a593Smuzhiyun 		bfa_trc(diag, res->act);
4793*4882a593Smuzhiyun 		bfa_trc(diag, res->err_status);
4794*4882a593Smuzhiyun 		bfa_trc(diag, res->err_status1);
4795*4882a593Smuzhiyun 		bfa_trc(diag, res->err_addr);
4796*4882a593Smuzhiyun 	}
4797*4882a593Smuzhiyun 	diag->timer_active = 0;
4798*4882a593Smuzhiyun 	diag->cbfn(diag->cbarg, diag->status);
4799*4882a593Smuzhiyun 	diag->block = 0;
4800*4882a593Smuzhiyun }
4801*4882a593Smuzhiyun 
4802*4882a593Smuzhiyun /*
4803*4882a593Smuzhiyun  * Firmware ping
4804*4882a593Smuzhiyun  */
4805*4882a593Smuzhiyun 
4806*4882a593Smuzhiyun /*
4807*4882a593Smuzhiyun  * Perform DMA test directly
4808*4882a593Smuzhiyun  */
4809*4882a593Smuzhiyun static void
diag_fwping_send(struct bfa_diag_s * diag)4810*4882a593Smuzhiyun diag_fwping_send(struct bfa_diag_s *diag)
4811*4882a593Smuzhiyun {
4812*4882a593Smuzhiyun 	struct bfi_diag_fwping_req_s *fwping_req;
4813*4882a593Smuzhiyun 	u32	i;
4814*4882a593Smuzhiyun 
4815*4882a593Smuzhiyun 	bfa_trc(diag, diag->fwping.dbuf_pa);
4816*4882a593Smuzhiyun 
4817*4882a593Smuzhiyun 	/* fill DMA area with pattern */
4818*4882a593Smuzhiyun 	for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4819*4882a593Smuzhiyun 		*((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4820*4882a593Smuzhiyun 
4821*4882a593Smuzhiyun 	/* Fill mbox msg */
4822*4882a593Smuzhiyun 	fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4823*4882a593Smuzhiyun 
4824*4882a593Smuzhiyun 	/* Setup SG list */
4825*4882a593Smuzhiyun 	bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4826*4882a593Smuzhiyun 			diag->fwping.dbuf_pa);
4827*4882a593Smuzhiyun 	/* Set up dma count */
4828*4882a593Smuzhiyun 	fwping_req->count = cpu_to_be32(diag->fwping.count);
4829*4882a593Smuzhiyun 	/* Set up data pattern */
4830*4882a593Smuzhiyun 	fwping_req->data = diag->fwping.data;
4831*4882a593Smuzhiyun 
4832*4882a593Smuzhiyun 	/* build host command */
4833*4882a593Smuzhiyun 	bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4834*4882a593Smuzhiyun 		bfa_ioc_portid(diag->ioc));
4835*4882a593Smuzhiyun 
4836*4882a593Smuzhiyun 	/* send mbox cmd */
4837*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4838*4882a593Smuzhiyun }
4839*4882a593Smuzhiyun 
4840*4882a593Smuzhiyun static void
diag_fwping_comp(struct bfa_diag_s * diag,struct bfi_diag_fwping_rsp_s * diag_rsp)4841*4882a593Smuzhiyun diag_fwping_comp(struct bfa_diag_s *diag,
4842*4882a593Smuzhiyun 		 struct bfi_diag_fwping_rsp_s *diag_rsp)
4843*4882a593Smuzhiyun {
4844*4882a593Smuzhiyun 	u32	rsp_data = diag_rsp->data;
4845*4882a593Smuzhiyun 	u8	rsp_dma_status = diag_rsp->dma_status;
4846*4882a593Smuzhiyun 
4847*4882a593Smuzhiyun 	bfa_trc(diag, rsp_data);
4848*4882a593Smuzhiyun 	bfa_trc(diag, rsp_dma_status);
4849*4882a593Smuzhiyun 
4850*4882a593Smuzhiyun 	if (rsp_dma_status == BFA_STATUS_OK) {
4851*4882a593Smuzhiyun 		u32	i, pat;
4852*4882a593Smuzhiyun 		pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4853*4882a593Smuzhiyun 			diag->fwping.data;
4854*4882a593Smuzhiyun 		/* Check mbox data */
4855*4882a593Smuzhiyun 		if (diag->fwping.data != rsp_data) {
4856*4882a593Smuzhiyun 			bfa_trc(diag, rsp_data);
4857*4882a593Smuzhiyun 			diag->fwping.result->dmastatus =
4858*4882a593Smuzhiyun 					BFA_STATUS_DATACORRUPTED;
4859*4882a593Smuzhiyun 			diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4860*4882a593Smuzhiyun 			diag->fwping.cbfn(diag->fwping.cbarg,
4861*4882a593Smuzhiyun 					diag->fwping.status);
4862*4882a593Smuzhiyun 			diag->fwping.lock = 0;
4863*4882a593Smuzhiyun 			return;
4864*4882a593Smuzhiyun 		}
4865*4882a593Smuzhiyun 		/* Check dma pattern */
4866*4882a593Smuzhiyun 		for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4867*4882a593Smuzhiyun 			if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4868*4882a593Smuzhiyun 				bfa_trc(diag, i);
4869*4882a593Smuzhiyun 				bfa_trc(diag, pat);
4870*4882a593Smuzhiyun 				bfa_trc(diag,
4871*4882a593Smuzhiyun 					*((u32 *)diag->fwping.dbuf_kva + i));
4872*4882a593Smuzhiyun 				diag->fwping.result->dmastatus =
4873*4882a593Smuzhiyun 						BFA_STATUS_DATACORRUPTED;
4874*4882a593Smuzhiyun 				diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4875*4882a593Smuzhiyun 				diag->fwping.cbfn(diag->fwping.cbarg,
4876*4882a593Smuzhiyun 						diag->fwping.status);
4877*4882a593Smuzhiyun 				diag->fwping.lock = 0;
4878*4882a593Smuzhiyun 				return;
4879*4882a593Smuzhiyun 			}
4880*4882a593Smuzhiyun 		}
4881*4882a593Smuzhiyun 		diag->fwping.result->dmastatus = BFA_STATUS_OK;
4882*4882a593Smuzhiyun 		diag->fwping.status = BFA_STATUS_OK;
4883*4882a593Smuzhiyun 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4884*4882a593Smuzhiyun 		diag->fwping.lock = 0;
4885*4882a593Smuzhiyun 	} else {
4886*4882a593Smuzhiyun 		diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4887*4882a593Smuzhiyun 		diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4888*4882a593Smuzhiyun 		diag->fwping.lock = 0;
4889*4882a593Smuzhiyun 	}
4890*4882a593Smuzhiyun }
4891*4882a593Smuzhiyun 
4892*4882a593Smuzhiyun /*
4893*4882a593Smuzhiyun  * Temperature Sensor
4894*4882a593Smuzhiyun  */
4895*4882a593Smuzhiyun 
4896*4882a593Smuzhiyun static void
diag_tempsensor_send(struct bfa_diag_s * diag)4897*4882a593Smuzhiyun diag_tempsensor_send(struct bfa_diag_s *diag)
4898*4882a593Smuzhiyun {
4899*4882a593Smuzhiyun 	struct bfi_diag_ts_req_s *msg;
4900*4882a593Smuzhiyun 
4901*4882a593Smuzhiyun 	msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4902*4882a593Smuzhiyun 	bfa_trc(diag, msg->temp);
4903*4882a593Smuzhiyun 	/* build host command */
4904*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4905*4882a593Smuzhiyun 		bfa_ioc_portid(diag->ioc));
4906*4882a593Smuzhiyun 	/* send mbox cmd */
4907*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4908*4882a593Smuzhiyun }
4909*4882a593Smuzhiyun 
4910*4882a593Smuzhiyun static void
diag_tempsensor_comp(struct bfa_diag_s * diag,bfi_diag_ts_rsp_t * rsp)4911*4882a593Smuzhiyun diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4912*4882a593Smuzhiyun {
4913*4882a593Smuzhiyun 	if (!diag->tsensor.lock) {
4914*4882a593Smuzhiyun 		/* receiving response after ioc failure */
4915*4882a593Smuzhiyun 		bfa_trc(diag, diag->tsensor.lock);
4916*4882a593Smuzhiyun 		return;
4917*4882a593Smuzhiyun 	}
4918*4882a593Smuzhiyun 
4919*4882a593Smuzhiyun 	/*
4920*4882a593Smuzhiyun 	 * ASIC junction tempsensor is a reg read operation
4921*4882a593Smuzhiyun 	 * it will always return OK
4922*4882a593Smuzhiyun 	 */
4923*4882a593Smuzhiyun 	diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4924*4882a593Smuzhiyun 	diag->tsensor.temp->ts_junc = rsp->ts_junc;
4925*4882a593Smuzhiyun 	diag->tsensor.temp->ts_brd = rsp->ts_brd;
4926*4882a593Smuzhiyun 
4927*4882a593Smuzhiyun 	if (rsp->ts_brd) {
4928*4882a593Smuzhiyun 		/* tsensor.temp->status is brd_temp status */
4929*4882a593Smuzhiyun 		diag->tsensor.temp->status = rsp->status;
4930*4882a593Smuzhiyun 		if (rsp->status == BFA_STATUS_OK) {
4931*4882a593Smuzhiyun 			diag->tsensor.temp->brd_temp =
4932*4882a593Smuzhiyun 				be16_to_cpu(rsp->brd_temp);
4933*4882a593Smuzhiyun 		} else
4934*4882a593Smuzhiyun 			diag->tsensor.temp->brd_temp = 0;
4935*4882a593Smuzhiyun 	}
4936*4882a593Smuzhiyun 
4937*4882a593Smuzhiyun 	bfa_trc(diag, rsp->status);
4938*4882a593Smuzhiyun 	bfa_trc(diag, rsp->ts_junc);
4939*4882a593Smuzhiyun 	bfa_trc(diag, rsp->temp);
4940*4882a593Smuzhiyun 	bfa_trc(diag, rsp->ts_brd);
4941*4882a593Smuzhiyun 	bfa_trc(diag, rsp->brd_temp);
4942*4882a593Smuzhiyun 
4943*4882a593Smuzhiyun 	/* tsensor status is always good bcos we always have junction temp */
4944*4882a593Smuzhiyun 	diag->tsensor.status = BFA_STATUS_OK;
4945*4882a593Smuzhiyun 	diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4946*4882a593Smuzhiyun 	diag->tsensor.lock = 0;
4947*4882a593Smuzhiyun }
4948*4882a593Smuzhiyun 
4949*4882a593Smuzhiyun /*
4950*4882a593Smuzhiyun  *	LED Test command
4951*4882a593Smuzhiyun  */
4952*4882a593Smuzhiyun static void
diag_ledtest_send(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)4953*4882a593Smuzhiyun diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4954*4882a593Smuzhiyun {
4955*4882a593Smuzhiyun 	struct bfi_diag_ledtest_req_s  *msg;
4956*4882a593Smuzhiyun 
4957*4882a593Smuzhiyun 	msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4958*4882a593Smuzhiyun 	/* build host command */
4959*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4960*4882a593Smuzhiyun 			bfa_ioc_portid(diag->ioc));
4961*4882a593Smuzhiyun 
4962*4882a593Smuzhiyun 	/*
4963*4882a593Smuzhiyun 	 * convert the freq from N blinks per 10 sec to
4964*4882a593Smuzhiyun 	 * crossbow ontime value. We do it here because division is need
4965*4882a593Smuzhiyun 	 */
4966*4882a593Smuzhiyun 	if (ledtest->freq)
4967*4882a593Smuzhiyun 		ledtest->freq = 500 / ledtest->freq;
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 	if (ledtest->freq == 0)
4970*4882a593Smuzhiyun 		ledtest->freq = 1;
4971*4882a593Smuzhiyun 
4972*4882a593Smuzhiyun 	bfa_trc(diag, ledtest->freq);
4973*4882a593Smuzhiyun 	/* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4974*4882a593Smuzhiyun 	msg->cmd = (u8) ledtest->cmd;
4975*4882a593Smuzhiyun 	msg->color = (u8) ledtest->color;
4976*4882a593Smuzhiyun 	msg->portid = bfa_ioc_portid(diag->ioc);
4977*4882a593Smuzhiyun 	msg->led = ledtest->led;
4978*4882a593Smuzhiyun 	msg->freq = cpu_to_be16(ledtest->freq);
4979*4882a593Smuzhiyun 
4980*4882a593Smuzhiyun 	/* send mbox cmd */
4981*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4982*4882a593Smuzhiyun }
4983*4882a593Smuzhiyun 
4984*4882a593Smuzhiyun static void
diag_ledtest_comp(struct bfa_diag_s * diag,struct bfi_diag_ledtest_rsp_s * msg)4985*4882a593Smuzhiyun diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
4986*4882a593Smuzhiyun {
4987*4882a593Smuzhiyun 	bfa_trc(diag, diag->ledtest.lock);
4988*4882a593Smuzhiyun 	diag->ledtest.lock = BFA_FALSE;
4989*4882a593Smuzhiyun 	/* no bfa_cb_queue is needed because driver is not waiting */
4990*4882a593Smuzhiyun }
4991*4882a593Smuzhiyun 
4992*4882a593Smuzhiyun /*
4993*4882a593Smuzhiyun  * Port beaconing
4994*4882a593Smuzhiyun  */
4995*4882a593Smuzhiyun static void
diag_portbeacon_send(struct bfa_diag_s * diag,bfa_boolean_t beacon,u32 sec)4996*4882a593Smuzhiyun diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4997*4882a593Smuzhiyun {
4998*4882a593Smuzhiyun 	struct bfi_diag_portbeacon_req_s *msg;
4999*4882a593Smuzhiyun 
5000*4882a593Smuzhiyun 	msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
5001*4882a593Smuzhiyun 	/* build host command */
5002*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
5003*4882a593Smuzhiyun 		bfa_ioc_portid(diag->ioc));
5004*4882a593Smuzhiyun 	msg->beacon = beacon;
5005*4882a593Smuzhiyun 	msg->period = cpu_to_be32(sec);
5006*4882a593Smuzhiyun 	/* send mbox cmd */
5007*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
5008*4882a593Smuzhiyun }
5009*4882a593Smuzhiyun 
5010*4882a593Smuzhiyun static void
diag_portbeacon_comp(struct bfa_diag_s * diag)5011*4882a593Smuzhiyun diag_portbeacon_comp(struct bfa_diag_s *diag)
5012*4882a593Smuzhiyun {
5013*4882a593Smuzhiyun 	bfa_trc(diag, diag->beacon.state);
5014*4882a593Smuzhiyun 	diag->beacon.state = BFA_FALSE;
5015*4882a593Smuzhiyun 	if (diag->cbfn_beacon)
5016*4882a593Smuzhiyun 		diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
5017*4882a593Smuzhiyun }
5018*4882a593Smuzhiyun 
5019*4882a593Smuzhiyun /*
5020*4882a593Smuzhiyun  *	Diag hmbox handler
5021*4882a593Smuzhiyun  */
5022*4882a593Smuzhiyun static void
bfa_diag_intr(void * diagarg,struct bfi_mbmsg_s * msg)5023*4882a593Smuzhiyun bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
5024*4882a593Smuzhiyun {
5025*4882a593Smuzhiyun 	struct bfa_diag_s *diag = diagarg;
5026*4882a593Smuzhiyun 
5027*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
5028*4882a593Smuzhiyun 	case BFI_DIAG_I2H_PORTBEACON:
5029*4882a593Smuzhiyun 		diag_portbeacon_comp(diag);
5030*4882a593Smuzhiyun 		break;
5031*4882a593Smuzhiyun 	case BFI_DIAG_I2H_FWPING:
5032*4882a593Smuzhiyun 		diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
5033*4882a593Smuzhiyun 		break;
5034*4882a593Smuzhiyun 	case BFI_DIAG_I2H_TEMPSENSOR:
5035*4882a593Smuzhiyun 		diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
5036*4882a593Smuzhiyun 		break;
5037*4882a593Smuzhiyun 	case BFI_DIAG_I2H_LEDTEST:
5038*4882a593Smuzhiyun 		diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
5039*4882a593Smuzhiyun 		break;
5040*4882a593Smuzhiyun 	default:
5041*4882a593Smuzhiyun 		bfa_trc(diag, msg->mh.msg_id);
5042*4882a593Smuzhiyun 		WARN_ON(1);
5043*4882a593Smuzhiyun 	}
5044*4882a593Smuzhiyun }
5045*4882a593Smuzhiyun 
5046*4882a593Smuzhiyun /*
5047*4882a593Smuzhiyun  * Gen RAM Test
5048*4882a593Smuzhiyun  *
5049*4882a593Smuzhiyun  *   @param[in] *diag           - diag data struct
5050*4882a593Smuzhiyun  *   @param[in] *memtest        - mem test params input from upper layer,
5051*4882a593Smuzhiyun  *   @param[in] pattern         - mem test pattern
5052*4882a593Smuzhiyun  *   @param[in] *result         - mem test result
5053*4882a593Smuzhiyun  *   @param[in] cbfn            - mem test callback functioin
5054*4882a593Smuzhiyun  *   @param[in] cbarg           - callback functioin arg
5055*4882a593Smuzhiyun  *
5056*4882a593Smuzhiyun  *   @param[out]
5057*4882a593Smuzhiyun  */
5058*4882a593Smuzhiyun bfa_status_t
bfa_diag_memtest(struct bfa_diag_s * diag,struct bfa_diag_memtest_s * memtest,u32 pattern,struct bfa_diag_memtest_result * result,bfa_cb_diag_t cbfn,void * cbarg)5059*4882a593Smuzhiyun bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
5060*4882a593Smuzhiyun 		u32 pattern, struct bfa_diag_memtest_result *result,
5061*4882a593Smuzhiyun 		bfa_cb_diag_t cbfn, void *cbarg)
5062*4882a593Smuzhiyun {
5063*4882a593Smuzhiyun 	u32	memtest_tov;
5064*4882a593Smuzhiyun 
5065*4882a593Smuzhiyun 	bfa_trc(diag, pattern);
5066*4882a593Smuzhiyun 
5067*4882a593Smuzhiyun 	if (!bfa_ioc_adapter_is_disabled(diag->ioc))
5068*4882a593Smuzhiyun 		return BFA_STATUS_ADAPTER_ENABLED;
5069*4882a593Smuzhiyun 
5070*4882a593Smuzhiyun 	/* check to see if there is another destructive diag cmd running */
5071*4882a593Smuzhiyun 	if (diag->block) {
5072*4882a593Smuzhiyun 		bfa_trc(diag, diag->block);
5073*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5074*4882a593Smuzhiyun 	} else
5075*4882a593Smuzhiyun 		diag->block = 1;
5076*4882a593Smuzhiyun 
5077*4882a593Smuzhiyun 	diag->result = result;
5078*4882a593Smuzhiyun 	diag->cbfn = cbfn;
5079*4882a593Smuzhiyun 	diag->cbarg = cbarg;
5080*4882a593Smuzhiyun 
5081*4882a593Smuzhiyun 	/* download memtest code and take LPU0 out of reset */
5082*4882a593Smuzhiyun 	bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
5083*4882a593Smuzhiyun 
5084*4882a593Smuzhiyun 	memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
5085*4882a593Smuzhiyun 		       CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
5086*4882a593Smuzhiyun 	bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
5087*4882a593Smuzhiyun 			bfa_diag_memtest_done, diag, memtest_tov);
5088*4882a593Smuzhiyun 	diag->timer_active = 1;
5089*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5090*4882a593Smuzhiyun }
5091*4882a593Smuzhiyun 
5092*4882a593Smuzhiyun /*
5093*4882a593Smuzhiyun  * DIAG firmware ping command
5094*4882a593Smuzhiyun  *
5095*4882a593Smuzhiyun  *   @param[in] *diag           - diag data struct
5096*4882a593Smuzhiyun  *   @param[in] cnt             - dma loop count for testing PCIE
5097*4882a593Smuzhiyun  *   @param[in] data            - data pattern to pass in fw
5098*4882a593Smuzhiyun  *   @param[in] *result         - pt to bfa_diag_fwping_result_t data struct
5099*4882a593Smuzhiyun  *   @param[in] cbfn            - callback function
5100*4882a593Smuzhiyun  *   @param[in] *cbarg          - callback functioin arg
5101*4882a593Smuzhiyun  *
5102*4882a593Smuzhiyun  *   @param[out]
5103*4882a593Smuzhiyun  */
5104*4882a593Smuzhiyun bfa_status_t
bfa_diag_fwping(struct bfa_diag_s * diag,u32 cnt,u32 data,struct bfa_diag_results_fwping * result,bfa_cb_diag_t cbfn,void * cbarg)5105*4882a593Smuzhiyun bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
5106*4882a593Smuzhiyun 		struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
5107*4882a593Smuzhiyun 		void *cbarg)
5108*4882a593Smuzhiyun {
5109*4882a593Smuzhiyun 	bfa_trc(diag, cnt);
5110*4882a593Smuzhiyun 	bfa_trc(diag, data);
5111*4882a593Smuzhiyun 
5112*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(diag->ioc))
5113*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5114*4882a593Smuzhiyun 
5115*4882a593Smuzhiyun 	if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
5116*4882a593Smuzhiyun 	    ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
5117*4882a593Smuzhiyun 		return BFA_STATUS_CMD_NOTSUPP;
5118*4882a593Smuzhiyun 
5119*4882a593Smuzhiyun 	/* check to see if there is another destructive diag cmd running */
5120*4882a593Smuzhiyun 	if (diag->block || diag->fwping.lock) {
5121*4882a593Smuzhiyun 		bfa_trc(diag, diag->block);
5122*4882a593Smuzhiyun 		bfa_trc(diag, diag->fwping.lock);
5123*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5124*4882a593Smuzhiyun 	}
5125*4882a593Smuzhiyun 
5126*4882a593Smuzhiyun 	/* Initialization */
5127*4882a593Smuzhiyun 	diag->fwping.lock = 1;
5128*4882a593Smuzhiyun 	diag->fwping.cbfn = cbfn;
5129*4882a593Smuzhiyun 	diag->fwping.cbarg = cbarg;
5130*4882a593Smuzhiyun 	diag->fwping.result = result;
5131*4882a593Smuzhiyun 	diag->fwping.data = data;
5132*4882a593Smuzhiyun 	diag->fwping.count = cnt;
5133*4882a593Smuzhiyun 
5134*4882a593Smuzhiyun 	/* Init test results */
5135*4882a593Smuzhiyun 	diag->fwping.result->data = 0;
5136*4882a593Smuzhiyun 	diag->fwping.result->status = BFA_STATUS_OK;
5137*4882a593Smuzhiyun 
5138*4882a593Smuzhiyun 	/* kick off the first ping */
5139*4882a593Smuzhiyun 	diag_fwping_send(diag);
5140*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5141*4882a593Smuzhiyun }
5142*4882a593Smuzhiyun 
5143*4882a593Smuzhiyun /*
5144*4882a593Smuzhiyun  * Read Temperature Sensor
5145*4882a593Smuzhiyun  *
5146*4882a593Smuzhiyun  *   @param[in] *diag           - diag data struct
5147*4882a593Smuzhiyun  *   @param[in] *result         - pt to bfa_diag_temp_t data struct
5148*4882a593Smuzhiyun  *   @param[in] cbfn            - callback function
5149*4882a593Smuzhiyun  *   @param[in] *cbarg          - callback functioin arg
5150*4882a593Smuzhiyun  *
5151*4882a593Smuzhiyun  *   @param[out]
5152*4882a593Smuzhiyun  */
5153*4882a593Smuzhiyun bfa_status_t
bfa_diag_tsensor_query(struct bfa_diag_s * diag,struct bfa_diag_results_tempsensor_s * result,bfa_cb_diag_t cbfn,void * cbarg)5154*4882a593Smuzhiyun bfa_diag_tsensor_query(struct bfa_diag_s *diag,
5155*4882a593Smuzhiyun 		struct bfa_diag_results_tempsensor_s *result,
5156*4882a593Smuzhiyun 		bfa_cb_diag_t cbfn, void *cbarg)
5157*4882a593Smuzhiyun {
5158*4882a593Smuzhiyun 	/* check to see if there is a destructive diag cmd running */
5159*4882a593Smuzhiyun 	if (diag->block || diag->tsensor.lock) {
5160*4882a593Smuzhiyun 		bfa_trc(diag, diag->block);
5161*4882a593Smuzhiyun 		bfa_trc(diag, diag->tsensor.lock);
5162*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5163*4882a593Smuzhiyun 	}
5164*4882a593Smuzhiyun 
5165*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(diag->ioc))
5166*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5167*4882a593Smuzhiyun 
5168*4882a593Smuzhiyun 	/* Init diag mod params */
5169*4882a593Smuzhiyun 	diag->tsensor.lock = 1;
5170*4882a593Smuzhiyun 	diag->tsensor.temp = result;
5171*4882a593Smuzhiyun 	diag->tsensor.cbfn = cbfn;
5172*4882a593Smuzhiyun 	diag->tsensor.cbarg = cbarg;
5173*4882a593Smuzhiyun 	diag->tsensor.status = BFA_STATUS_OK;
5174*4882a593Smuzhiyun 
5175*4882a593Smuzhiyun 	/* Send msg to fw */
5176*4882a593Smuzhiyun 	diag_tempsensor_send(diag);
5177*4882a593Smuzhiyun 
5178*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5179*4882a593Smuzhiyun }
5180*4882a593Smuzhiyun 
5181*4882a593Smuzhiyun /*
5182*4882a593Smuzhiyun  * LED Test command
5183*4882a593Smuzhiyun  *
5184*4882a593Smuzhiyun  *   @param[in] *diag           - diag data struct
5185*4882a593Smuzhiyun  *   @param[in] *ledtest        - pt to ledtest data structure
5186*4882a593Smuzhiyun  *
5187*4882a593Smuzhiyun  *   @param[out]
5188*4882a593Smuzhiyun  */
5189*4882a593Smuzhiyun bfa_status_t
bfa_diag_ledtest(struct bfa_diag_s * diag,struct bfa_diag_ledtest_s * ledtest)5190*4882a593Smuzhiyun bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5191*4882a593Smuzhiyun {
5192*4882a593Smuzhiyun 	bfa_trc(diag, ledtest->cmd);
5193*4882a593Smuzhiyun 
5194*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(diag->ioc))
5195*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5196*4882a593Smuzhiyun 
5197*4882a593Smuzhiyun 	if (diag->beacon.state)
5198*4882a593Smuzhiyun 		return BFA_STATUS_BEACON_ON;
5199*4882a593Smuzhiyun 
5200*4882a593Smuzhiyun 	if (diag->ledtest.lock)
5201*4882a593Smuzhiyun 		return BFA_STATUS_LEDTEST_OP;
5202*4882a593Smuzhiyun 
5203*4882a593Smuzhiyun 	/* Send msg to fw */
5204*4882a593Smuzhiyun 	diag->ledtest.lock = BFA_TRUE;
5205*4882a593Smuzhiyun 	diag_ledtest_send(diag, ledtest);
5206*4882a593Smuzhiyun 
5207*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5208*4882a593Smuzhiyun }
5209*4882a593Smuzhiyun 
5210*4882a593Smuzhiyun /*
5211*4882a593Smuzhiyun  * Port beaconing command
5212*4882a593Smuzhiyun  *
5213*4882a593Smuzhiyun  *   @param[in] *diag           - diag data struct
5214*4882a593Smuzhiyun  *   @param[in] beacon          - port beaconing 1:ON   0:OFF
5215*4882a593Smuzhiyun  *   @param[in] link_e2e_beacon - link beaconing 1:ON   0:OFF
5216*4882a593Smuzhiyun  *   @param[in] sec             - beaconing duration in seconds
5217*4882a593Smuzhiyun  *
5218*4882a593Smuzhiyun  *   @param[out]
5219*4882a593Smuzhiyun  */
5220*4882a593Smuzhiyun bfa_status_t
bfa_diag_beacon_port(struct bfa_diag_s * diag,bfa_boolean_t beacon,bfa_boolean_t link_e2e_beacon,uint32_t sec)5221*4882a593Smuzhiyun bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5222*4882a593Smuzhiyun 		bfa_boolean_t link_e2e_beacon, uint32_t sec)
5223*4882a593Smuzhiyun {
5224*4882a593Smuzhiyun 	bfa_trc(diag, beacon);
5225*4882a593Smuzhiyun 	bfa_trc(diag, link_e2e_beacon);
5226*4882a593Smuzhiyun 	bfa_trc(diag, sec);
5227*4882a593Smuzhiyun 
5228*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(diag->ioc))
5229*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5230*4882a593Smuzhiyun 
5231*4882a593Smuzhiyun 	if (diag->ledtest.lock)
5232*4882a593Smuzhiyun 		return BFA_STATUS_LEDTEST_OP;
5233*4882a593Smuzhiyun 
5234*4882a593Smuzhiyun 	if (diag->beacon.state && beacon)       /* beacon alread on */
5235*4882a593Smuzhiyun 		return BFA_STATUS_BEACON_ON;
5236*4882a593Smuzhiyun 
5237*4882a593Smuzhiyun 	diag->beacon.state	= beacon;
5238*4882a593Smuzhiyun 	diag->beacon.link_e2e	= link_e2e_beacon;
5239*4882a593Smuzhiyun 	if (diag->cbfn_beacon)
5240*4882a593Smuzhiyun 		diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5241*4882a593Smuzhiyun 
5242*4882a593Smuzhiyun 	/* Send msg to fw */
5243*4882a593Smuzhiyun 	diag_portbeacon_send(diag, beacon, sec);
5244*4882a593Smuzhiyun 
5245*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5246*4882a593Smuzhiyun }
5247*4882a593Smuzhiyun 
5248*4882a593Smuzhiyun /*
5249*4882a593Smuzhiyun  * Return DMA memory needed by diag module.
5250*4882a593Smuzhiyun  */
5251*4882a593Smuzhiyun u32
bfa_diag_meminfo(void)5252*4882a593Smuzhiyun bfa_diag_meminfo(void)
5253*4882a593Smuzhiyun {
5254*4882a593Smuzhiyun 	return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5255*4882a593Smuzhiyun }
5256*4882a593Smuzhiyun 
5257*4882a593Smuzhiyun /*
5258*4882a593Smuzhiyun  *	Attach virtual and physical memory for Diag.
5259*4882a593Smuzhiyun  */
5260*4882a593Smuzhiyun void
bfa_diag_attach(struct bfa_diag_s * diag,struct bfa_ioc_s * ioc,void * dev,bfa_cb_diag_beacon_t cbfn_beacon,struct bfa_trc_mod_s * trcmod)5261*4882a593Smuzhiyun bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5262*4882a593Smuzhiyun 	bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5263*4882a593Smuzhiyun {
5264*4882a593Smuzhiyun 	diag->dev = dev;
5265*4882a593Smuzhiyun 	diag->ioc = ioc;
5266*4882a593Smuzhiyun 	diag->trcmod = trcmod;
5267*4882a593Smuzhiyun 
5268*4882a593Smuzhiyun 	diag->block = 0;
5269*4882a593Smuzhiyun 	diag->cbfn = NULL;
5270*4882a593Smuzhiyun 	diag->cbarg = NULL;
5271*4882a593Smuzhiyun 	diag->result = NULL;
5272*4882a593Smuzhiyun 	diag->cbfn_beacon = cbfn_beacon;
5273*4882a593Smuzhiyun 
5274*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5275*4882a593Smuzhiyun 	bfa_q_qe_init(&diag->ioc_notify);
5276*4882a593Smuzhiyun 	bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5277*4882a593Smuzhiyun 	list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5278*4882a593Smuzhiyun }
5279*4882a593Smuzhiyun 
5280*4882a593Smuzhiyun void
bfa_diag_memclaim(struct bfa_diag_s * diag,u8 * dm_kva,u64 dm_pa)5281*4882a593Smuzhiyun bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5282*4882a593Smuzhiyun {
5283*4882a593Smuzhiyun 	diag->fwping.dbuf_kva = dm_kva;
5284*4882a593Smuzhiyun 	diag->fwping.dbuf_pa = dm_pa;
5285*4882a593Smuzhiyun 	memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5286*4882a593Smuzhiyun }
5287*4882a593Smuzhiyun 
5288*4882a593Smuzhiyun /*
5289*4882a593Smuzhiyun  *	PHY module specific
5290*4882a593Smuzhiyun  */
5291*4882a593Smuzhiyun #define BFA_PHY_DMA_BUF_SZ	0x02000         /* 8k dma buffer */
5292*4882a593Smuzhiyun #define BFA_PHY_LOCK_STATUS	0x018878        /* phy semaphore status reg */
5293*4882a593Smuzhiyun 
5294*4882a593Smuzhiyun static void
bfa_phy_ntoh32(u32 * obuf,u32 * ibuf,int sz)5295*4882a593Smuzhiyun bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5296*4882a593Smuzhiyun {
5297*4882a593Smuzhiyun 	int i, m = sz >> 2;
5298*4882a593Smuzhiyun 
5299*4882a593Smuzhiyun 	for (i = 0; i < m; i++)
5300*4882a593Smuzhiyun 		obuf[i] = be32_to_cpu(ibuf[i]);
5301*4882a593Smuzhiyun }
5302*4882a593Smuzhiyun 
5303*4882a593Smuzhiyun static bfa_boolean_t
bfa_phy_present(struct bfa_phy_s * phy)5304*4882a593Smuzhiyun bfa_phy_present(struct bfa_phy_s *phy)
5305*4882a593Smuzhiyun {
5306*4882a593Smuzhiyun 	return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5307*4882a593Smuzhiyun }
5308*4882a593Smuzhiyun 
5309*4882a593Smuzhiyun static void
bfa_phy_notify(void * cbarg,enum bfa_ioc_event_e event)5310*4882a593Smuzhiyun bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5311*4882a593Smuzhiyun {
5312*4882a593Smuzhiyun 	struct bfa_phy_s *phy = cbarg;
5313*4882a593Smuzhiyun 
5314*4882a593Smuzhiyun 	bfa_trc(phy, event);
5315*4882a593Smuzhiyun 
5316*4882a593Smuzhiyun 	switch (event) {
5317*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
5318*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
5319*4882a593Smuzhiyun 		if (phy->op_busy) {
5320*4882a593Smuzhiyun 			phy->status = BFA_STATUS_IOC_FAILURE;
5321*4882a593Smuzhiyun 			phy->cbfn(phy->cbarg, phy->status);
5322*4882a593Smuzhiyun 			phy->op_busy = 0;
5323*4882a593Smuzhiyun 		}
5324*4882a593Smuzhiyun 		break;
5325*4882a593Smuzhiyun 
5326*4882a593Smuzhiyun 	default:
5327*4882a593Smuzhiyun 		break;
5328*4882a593Smuzhiyun 	}
5329*4882a593Smuzhiyun }
5330*4882a593Smuzhiyun 
5331*4882a593Smuzhiyun /*
5332*4882a593Smuzhiyun  * Send phy attribute query request.
5333*4882a593Smuzhiyun  *
5334*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5335*4882a593Smuzhiyun  */
5336*4882a593Smuzhiyun static void
bfa_phy_query_send(void * cbarg)5337*4882a593Smuzhiyun bfa_phy_query_send(void *cbarg)
5338*4882a593Smuzhiyun {
5339*4882a593Smuzhiyun 	struct bfa_phy_s *phy = cbarg;
5340*4882a593Smuzhiyun 	struct bfi_phy_query_req_s *msg =
5341*4882a593Smuzhiyun 			(struct bfi_phy_query_req_s *) phy->mb.msg;
5342*4882a593Smuzhiyun 
5343*4882a593Smuzhiyun 	msg->instance = phy->instance;
5344*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5345*4882a593Smuzhiyun 		bfa_ioc_portid(phy->ioc));
5346*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5347*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5348*4882a593Smuzhiyun }
5349*4882a593Smuzhiyun 
5350*4882a593Smuzhiyun /*
5351*4882a593Smuzhiyun  * Send phy write request.
5352*4882a593Smuzhiyun  *
5353*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5354*4882a593Smuzhiyun  */
5355*4882a593Smuzhiyun static void
bfa_phy_write_send(void * cbarg)5356*4882a593Smuzhiyun bfa_phy_write_send(void *cbarg)
5357*4882a593Smuzhiyun {
5358*4882a593Smuzhiyun 	struct bfa_phy_s *phy = cbarg;
5359*4882a593Smuzhiyun 	struct bfi_phy_write_req_s *msg =
5360*4882a593Smuzhiyun 			(struct bfi_phy_write_req_s *) phy->mb.msg;
5361*4882a593Smuzhiyun 	u32	len;
5362*4882a593Smuzhiyun 	u16	*buf, *dbuf;
5363*4882a593Smuzhiyun 	int	i, sz;
5364*4882a593Smuzhiyun 
5365*4882a593Smuzhiyun 	msg->instance = phy->instance;
5366*4882a593Smuzhiyun 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5367*4882a593Smuzhiyun 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5368*4882a593Smuzhiyun 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5369*4882a593Smuzhiyun 	msg->length = cpu_to_be32(len);
5370*4882a593Smuzhiyun 
5371*4882a593Smuzhiyun 	/* indicate if it's the last msg of the whole write operation */
5372*4882a593Smuzhiyun 	msg->last = (len == phy->residue) ? 1 : 0;
5373*4882a593Smuzhiyun 
5374*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5375*4882a593Smuzhiyun 		bfa_ioc_portid(phy->ioc));
5376*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5377*4882a593Smuzhiyun 
5378*4882a593Smuzhiyun 	buf = (u16 *) (phy->ubuf + phy->offset);
5379*4882a593Smuzhiyun 	dbuf = (u16 *)phy->dbuf_kva;
5380*4882a593Smuzhiyun 	sz = len >> 1;
5381*4882a593Smuzhiyun 	for (i = 0; i < sz; i++)
5382*4882a593Smuzhiyun 		buf[i] = cpu_to_be16(dbuf[i]);
5383*4882a593Smuzhiyun 
5384*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5385*4882a593Smuzhiyun 
5386*4882a593Smuzhiyun 	phy->residue -= len;
5387*4882a593Smuzhiyun 	phy->offset += len;
5388*4882a593Smuzhiyun }
5389*4882a593Smuzhiyun 
5390*4882a593Smuzhiyun /*
5391*4882a593Smuzhiyun  * Send phy read request.
5392*4882a593Smuzhiyun  *
5393*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5394*4882a593Smuzhiyun  */
5395*4882a593Smuzhiyun static void
bfa_phy_read_send(void * cbarg)5396*4882a593Smuzhiyun bfa_phy_read_send(void *cbarg)
5397*4882a593Smuzhiyun {
5398*4882a593Smuzhiyun 	struct bfa_phy_s *phy = cbarg;
5399*4882a593Smuzhiyun 	struct bfi_phy_read_req_s *msg =
5400*4882a593Smuzhiyun 			(struct bfi_phy_read_req_s *) phy->mb.msg;
5401*4882a593Smuzhiyun 	u32	len;
5402*4882a593Smuzhiyun 
5403*4882a593Smuzhiyun 	msg->instance = phy->instance;
5404*4882a593Smuzhiyun 	msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5405*4882a593Smuzhiyun 	len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5406*4882a593Smuzhiyun 			phy->residue : BFA_PHY_DMA_BUF_SZ;
5407*4882a593Smuzhiyun 	msg->length = cpu_to_be32(len);
5408*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5409*4882a593Smuzhiyun 		bfa_ioc_portid(phy->ioc));
5410*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5411*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5412*4882a593Smuzhiyun }
5413*4882a593Smuzhiyun 
5414*4882a593Smuzhiyun /*
5415*4882a593Smuzhiyun  * Send phy stats request.
5416*4882a593Smuzhiyun  *
5417*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5418*4882a593Smuzhiyun  */
5419*4882a593Smuzhiyun static void
bfa_phy_stats_send(void * cbarg)5420*4882a593Smuzhiyun bfa_phy_stats_send(void *cbarg)
5421*4882a593Smuzhiyun {
5422*4882a593Smuzhiyun 	struct bfa_phy_s *phy = cbarg;
5423*4882a593Smuzhiyun 	struct bfi_phy_stats_req_s *msg =
5424*4882a593Smuzhiyun 			(struct bfi_phy_stats_req_s *) phy->mb.msg;
5425*4882a593Smuzhiyun 
5426*4882a593Smuzhiyun 	msg->instance = phy->instance;
5427*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5428*4882a593Smuzhiyun 		bfa_ioc_portid(phy->ioc));
5429*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5430*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5431*4882a593Smuzhiyun }
5432*4882a593Smuzhiyun 
5433*4882a593Smuzhiyun /*
5434*4882a593Smuzhiyun  * Flash memory info API.
5435*4882a593Smuzhiyun  *
5436*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
5437*4882a593Smuzhiyun  */
5438*4882a593Smuzhiyun u32
bfa_phy_meminfo(bfa_boolean_t mincfg)5439*4882a593Smuzhiyun bfa_phy_meminfo(bfa_boolean_t mincfg)
5440*4882a593Smuzhiyun {
5441*4882a593Smuzhiyun 	/* min driver doesn't need phy */
5442*4882a593Smuzhiyun 	if (mincfg)
5443*4882a593Smuzhiyun 		return 0;
5444*4882a593Smuzhiyun 
5445*4882a593Smuzhiyun 	return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5446*4882a593Smuzhiyun }
5447*4882a593Smuzhiyun 
5448*4882a593Smuzhiyun /*
5449*4882a593Smuzhiyun  * Flash attach API.
5450*4882a593Smuzhiyun  *
5451*4882a593Smuzhiyun  * @param[in] phy - phy structure
5452*4882a593Smuzhiyun  * @param[in] ioc  - ioc structure
5453*4882a593Smuzhiyun  * @param[in] dev  - device structure
5454*4882a593Smuzhiyun  * @param[in] trcmod - trace module
5455*4882a593Smuzhiyun  * @param[in] logmod - log module
5456*4882a593Smuzhiyun  */
5457*4882a593Smuzhiyun void
bfa_phy_attach(struct bfa_phy_s * phy,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)5458*4882a593Smuzhiyun bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5459*4882a593Smuzhiyun 		struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5460*4882a593Smuzhiyun {
5461*4882a593Smuzhiyun 	phy->ioc = ioc;
5462*4882a593Smuzhiyun 	phy->trcmod = trcmod;
5463*4882a593Smuzhiyun 	phy->cbfn = NULL;
5464*4882a593Smuzhiyun 	phy->cbarg = NULL;
5465*4882a593Smuzhiyun 	phy->op_busy = 0;
5466*4882a593Smuzhiyun 
5467*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5468*4882a593Smuzhiyun 	bfa_q_qe_init(&phy->ioc_notify);
5469*4882a593Smuzhiyun 	bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5470*4882a593Smuzhiyun 	list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5471*4882a593Smuzhiyun 
5472*4882a593Smuzhiyun 	/* min driver doesn't need phy */
5473*4882a593Smuzhiyun 	if (mincfg) {
5474*4882a593Smuzhiyun 		phy->dbuf_kva = NULL;
5475*4882a593Smuzhiyun 		phy->dbuf_pa = 0;
5476*4882a593Smuzhiyun 	}
5477*4882a593Smuzhiyun }
5478*4882a593Smuzhiyun 
5479*4882a593Smuzhiyun /*
5480*4882a593Smuzhiyun  * Claim memory for phy
5481*4882a593Smuzhiyun  *
5482*4882a593Smuzhiyun  * @param[in] phy - phy structure
5483*4882a593Smuzhiyun  * @param[in] dm_kva - pointer to virtual memory address
5484*4882a593Smuzhiyun  * @param[in] dm_pa - physical memory address
5485*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
5486*4882a593Smuzhiyun  */
5487*4882a593Smuzhiyun void
bfa_phy_memclaim(struct bfa_phy_s * phy,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)5488*4882a593Smuzhiyun bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5489*4882a593Smuzhiyun 		bfa_boolean_t mincfg)
5490*4882a593Smuzhiyun {
5491*4882a593Smuzhiyun 	if (mincfg)
5492*4882a593Smuzhiyun 		return;
5493*4882a593Smuzhiyun 
5494*4882a593Smuzhiyun 	phy->dbuf_kva = dm_kva;
5495*4882a593Smuzhiyun 	phy->dbuf_pa = dm_pa;
5496*4882a593Smuzhiyun 	memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5497*4882a593Smuzhiyun 	dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5498*4882a593Smuzhiyun 	dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5499*4882a593Smuzhiyun }
5500*4882a593Smuzhiyun 
5501*4882a593Smuzhiyun bfa_boolean_t
bfa_phy_busy(struct bfa_ioc_s * ioc)5502*4882a593Smuzhiyun bfa_phy_busy(struct bfa_ioc_s *ioc)
5503*4882a593Smuzhiyun {
5504*4882a593Smuzhiyun 	void __iomem	*rb;
5505*4882a593Smuzhiyun 
5506*4882a593Smuzhiyun 	rb = bfa_ioc_bar0(ioc);
5507*4882a593Smuzhiyun 	return readl(rb + BFA_PHY_LOCK_STATUS);
5508*4882a593Smuzhiyun }
5509*4882a593Smuzhiyun 
5510*4882a593Smuzhiyun /*
5511*4882a593Smuzhiyun  * Get phy attribute.
5512*4882a593Smuzhiyun  *
5513*4882a593Smuzhiyun  * @param[in] phy - phy structure
5514*4882a593Smuzhiyun  * @param[in] attr - phy attribute structure
5515*4882a593Smuzhiyun  * @param[in] cbfn - callback function
5516*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5517*4882a593Smuzhiyun  *
5518*4882a593Smuzhiyun  * Return status.
5519*4882a593Smuzhiyun  */
5520*4882a593Smuzhiyun bfa_status_t
bfa_phy_get_attr(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_attr_s * attr,bfa_cb_phy_t cbfn,void * cbarg)5521*4882a593Smuzhiyun bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5522*4882a593Smuzhiyun 		struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5523*4882a593Smuzhiyun {
5524*4882a593Smuzhiyun 	bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5525*4882a593Smuzhiyun 	bfa_trc(phy, instance);
5526*4882a593Smuzhiyun 
5527*4882a593Smuzhiyun 	if (!bfa_phy_present(phy))
5528*4882a593Smuzhiyun 		return BFA_STATUS_PHY_NOT_PRESENT;
5529*4882a593Smuzhiyun 
5530*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(phy->ioc))
5531*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5532*4882a593Smuzhiyun 
5533*4882a593Smuzhiyun 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5534*4882a593Smuzhiyun 		bfa_trc(phy, phy->op_busy);
5535*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5536*4882a593Smuzhiyun 	}
5537*4882a593Smuzhiyun 
5538*4882a593Smuzhiyun 	phy->op_busy = 1;
5539*4882a593Smuzhiyun 	phy->cbfn = cbfn;
5540*4882a593Smuzhiyun 	phy->cbarg = cbarg;
5541*4882a593Smuzhiyun 	phy->instance = instance;
5542*4882a593Smuzhiyun 	phy->ubuf = (uint8_t *) attr;
5543*4882a593Smuzhiyun 	bfa_phy_query_send(phy);
5544*4882a593Smuzhiyun 
5545*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5546*4882a593Smuzhiyun }
5547*4882a593Smuzhiyun 
5548*4882a593Smuzhiyun /*
5549*4882a593Smuzhiyun  * Get phy stats.
5550*4882a593Smuzhiyun  *
5551*4882a593Smuzhiyun  * @param[in] phy - phy structure
5552*4882a593Smuzhiyun  * @param[in] instance - phy image instance
5553*4882a593Smuzhiyun  * @param[in] stats - pointer to phy stats
5554*4882a593Smuzhiyun  * @param[in] cbfn - callback function
5555*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5556*4882a593Smuzhiyun  *
5557*4882a593Smuzhiyun  * Return status.
5558*4882a593Smuzhiyun  */
5559*4882a593Smuzhiyun bfa_status_t
bfa_phy_get_stats(struct bfa_phy_s * phy,u8 instance,struct bfa_phy_stats_s * stats,bfa_cb_phy_t cbfn,void * cbarg)5560*4882a593Smuzhiyun bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5561*4882a593Smuzhiyun 		struct bfa_phy_stats_s *stats,
5562*4882a593Smuzhiyun 		bfa_cb_phy_t cbfn, void *cbarg)
5563*4882a593Smuzhiyun {
5564*4882a593Smuzhiyun 	bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5565*4882a593Smuzhiyun 	bfa_trc(phy, instance);
5566*4882a593Smuzhiyun 
5567*4882a593Smuzhiyun 	if (!bfa_phy_present(phy))
5568*4882a593Smuzhiyun 		return BFA_STATUS_PHY_NOT_PRESENT;
5569*4882a593Smuzhiyun 
5570*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(phy->ioc))
5571*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5572*4882a593Smuzhiyun 
5573*4882a593Smuzhiyun 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5574*4882a593Smuzhiyun 		bfa_trc(phy, phy->op_busy);
5575*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5576*4882a593Smuzhiyun 	}
5577*4882a593Smuzhiyun 
5578*4882a593Smuzhiyun 	phy->op_busy = 1;
5579*4882a593Smuzhiyun 	phy->cbfn = cbfn;
5580*4882a593Smuzhiyun 	phy->cbarg = cbarg;
5581*4882a593Smuzhiyun 	phy->instance = instance;
5582*4882a593Smuzhiyun 	phy->ubuf = (u8 *) stats;
5583*4882a593Smuzhiyun 	bfa_phy_stats_send(phy);
5584*4882a593Smuzhiyun 
5585*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5586*4882a593Smuzhiyun }
5587*4882a593Smuzhiyun 
5588*4882a593Smuzhiyun /*
5589*4882a593Smuzhiyun  * Update phy image.
5590*4882a593Smuzhiyun  *
5591*4882a593Smuzhiyun  * @param[in] phy - phy structure
5592*4882a593Smuzhiyun  * @param[in] instance - phy image instance
5593*4882a593Smuzhiyun  * @param[in] buf - update data buffer
5594*4882a593Smuzhiyun  * @param[in] len - data buffer length
5595*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
5596*4882a593Smuzhiyun  * @param[in] cbfn - callback function
5597*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5598*4882a593Smuzhiyun  *
5599*4882a593Smuzhiyun  * Return status.
5600*4882a593Smuzhiyun  */
5601*4882a593Smuzhiyun bfa_status_t
bfa_phy_update(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5602*4882a593Smuzhiyun bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5603*4882a593Smuzhiyun 		void *buf, u32 len, u32 offset,
5604*4882a593Smuzhiyun 		bfa_cb_phy_t cbfn, void *cbarg)
5605*4882a593Smuzhiyun {
5606*4882a593Smuzhiyun 	bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5607*4882a593Smuzhiyun 	bfa_trc(phy, instance);
5608*4882a593Smuzhiyun 	bfa_trc(phy, len);
5609*4882a593Smuzhiyun 	bfa_trc(phy, offset);
5610*4882a593Smuzhiyun 
5611*4882a593Smuzhiyun 	if (!bfa_phy_present(phy))
5612*4882a593Smuzhiyun 		return BFA_STATUS_PHY_NOT_PRESENT;
5613*4882a593Smuzhiyun 
5614*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(phy->ioc))
5615*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5616*4882a593Smuzhiyun 
5617*4882a593Smuzhiyun 	/* 'len' must be in word (4-byte) boundary */
5618*4882a593Smuzhiyun 	if (!len || (len & 0x03))
5619*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
5620*4882a593Smuzhiyun 
5621*4882a593Smuzhiyun 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5622*4882a593Smuzhiyun 		bfa_trc(phy, phy->op_busy);
5623*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5624*4882a593Smuzhiyun 	}
5625*4882a593Smuzhiyun 
5626*4882a593Smuzhiyun 	phy->op_busy = 1;
5627*4882a593Smuzhiyun 	phy->cbfn = cbfn;
5628*4882a593Smuzhiyun 	phy->cbarg = cbarg;
5629*4882a593Smuzhiyun 	phy->instance = instance;
5630*4882a593Smuzhiyun 	phy->residue = len;
5631*4882a593Smuzhiyun 	phy->offset = 0;
5632*4882a593Smuzhiyun 	phy->addr_off = offset;
5633*4882a593Smuzhiyun 	phy->ubuf = buf;
5634*4882a593Smuzhiyun 
5635*4882a593Smuzhiyun 	bfa_phy_write_send(phy);
5636*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5637*4882a593Smuzhiyun }
5638*4882a593Smuzhiyun 
5639*4882a593Smuzhiyun /*
5640*4882a593Smuzhiyun  * Read phy image.
5641*4882a593Smuzhiyun  *
5642*4882a593Smuzhiyun  * @param[in] phy - phy structure
5643*4882a593Smuzhiyun  * @param[in] instance - phy image instance
5644*4882a593Smuzhiyun  * @param[in] buf - read data buffer
5645*4882a593Smuzhiyun  * @param[in] len - data buffer length
5646*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
5647*4882a593Smuzhiyun  * @param[in] cbfn - callback function
5648*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
5649*4882a593Smuzhiyun  *
5650*4882a593Smuzhiyun  * Return status.
5651*4882a593Smuzhiyun  */
5652*4882a593Smuzhiyun bfa_status_t
bfa_phy_read(struct bfa_phy_s * phy,u8 instance,void * buf,u32 len,u32 offset,bfa_cb_phy_t cbfn,void * cbarg)5653*4882a593Smuzhiyun bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5654*4882a593Smuzhiyun 		void *buf, u32 len, u32 offset,
5655*4882a593Smuzhiyun 		bfa_cb_phy_t cbfn, void *cbarg)
5656*4882a593Smuzhiyun {
5657*4882a593Smuzhiyun 	bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5658*4882a593Smuzhiyun 	bfa_trc(phy, instance);
5659*4882a593Smuzhiyun 	bfa_trc(phy, len);
5660*4882a593Smuzhiyun 	bfa_trc(phy, offset);
5661*4882a593Smuzhiyun 
5662*4882a593Smuzhiyun 	if (!bfa_phy_present(phy))
5663*4882a593Smuzhiyun 		return BFA_STATUS_PHY_NOT_PRESENT;
5664*4882a593Smuzhiyun 
5665*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(phy->ioc))
5666*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
5667*4882a593Smuzhiyun 
5668*4882a593Smuzhiyun 	/* 'len' must be in word (4-byte) boundary */
5669*4882a593Smuzhiyun 	if (!len || (len & 0x03))
5670*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
5671*4882a593Smuzhiyun 
5672*4882a593Smuzhiyun 	if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5673*4882a593Smuzhiyun 		bfa_trc(phy, phy->op_busy);
5674*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
5675*4882a593Smuzhiyun 	}
5676*4882a593Smuzhiyun 
5677*4882a593Smuzhiyun 	phy->op_busy = 1;
5678*4882a593Smuzhiyun 	phy->cbfn = cbfn;
5679*4882a593Smuzhiyun 	phy->cbarg = cbarg;
5680*4882a593Smuzhiyun 	phy->instance = instance;
5681*4882a593Smuzhiyun 	phy->residue = len;
5682*4882a593Smuzhiyun 	phy->offset = 0;
5683*4882a593Smuzhiyun 	phy->addr_off = offset;
5684*4882a593Smuzhiyun 	phy->ubuf = buf;
5685*4882a593Smuzhiyun 	bfa_phy_read_send(phy);
5686*4882a593Smuzhiyun 
5687*4882a593Smuzhiyun 	return BFA_STATUS_OK;
5688*4882a593Smuzhiyun }
5689*4882a593Smuzhiyun 
5690*4882a593Smuzhiyun /*
5691*4882a593Smuzhiyun  * Process phy response messages upon receiving interrupts.
5692*4882a593Smuzhiyun  *
5693*4882a593Smuzhiyun  * @param[in] phyarg - phy structure
5694*4882a593Smuzhiyun  * @param[in] msg - message structure
5695*4882a593Smuzhiyun  */
5696*4882a593Smuzhiyun void
bfa_phy_intr(void * phyarg,struct bfi_mbmsg_s * msg)5697*4882a593Smuzhiyun bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5698*4882a593Smuzhiyun {
5699*4882a593Smuzhiyun 	struct bfa_phy_s *phy = phyarg;
5700*4882a593Smuzhiyun 	u32	status;
5701*4882a593Smuzhiyun 
5702*4882a593Smuzhiyun 	union {
5703*4882a593Smuzhiyun 		struct bfi_phy_query_rsp_s *query;
5704*4882a593Smuzhiyun 		struct bfi_phy_stats_rsp_s *stats;
5705*4882a593Smuzhiyun 		struct bfi_phy_write_rsp_s *write;
5706*4882a593Smuzhiyun 		struct bfi_phy_read_rsp_s *read;
5707*4882a593Smuzhiyun 		struct bfi_mbmsg_s   *msg;
5708*4882a593Smuzhiyun 	} m;
5709*4882a593Smuzhiyun 
5710*4882a593Smuzhiyun 	m.msg = msg;
5711*4882a593Smuzhiyun 	bfa_trc(phy, msg->mh.msg_id);
5712*4882a593Smuzhiyun 
5713*4882a593Smuzhiyun 	if (!phy->op_busy) {
5714*4882a593Smuzhiyun 		/* receiving response after ioc failure */
5715*4882a593Smuzhiyun 		bfa_trc(phy, 0x9999);
5716*4882a593Smuzhiyun 		return;
5717*4882a593Smuzhiyun 	}
5718*4882a593Smuzhiyun 
5719*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
5720*4882a593Smuzhiyun 	case BFI_PHY_I2H_QUERY_RSP:
5721*4882a593Smuzhiyun 		status = be32_to_cpu(m.query->status);
5722*4882a593Smuzhiyun 		bfa_trc(phy, status);
5723*4882a593Smuzhiyun 
5724*4882a593Smuzhiyun 		if (status == BFA_STATUS_OK) {
5725*4882a593Smuzhiyun 			struct bfa_phy_attr_s *attr =
5726*4882a593Smuzhiyun 				(struct bfa_phy_attr_s *) phy->ubuf;
5727*4882a593Smuzhiyun 			bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5728*4882a593Smuzhiyun 					sizeof(struct bfa_phy_attr_s));
5729*4882a593Smuzhiyun 			bfa_trc(phy, attr->status);
5730*4882a593Smuzhiyun 			bfa_trc(phy, attr->length);
5731*4882a593Smuzhiyun 		}
5732*4882a593Smuzhiyun 
5733*4882a593Smuzhiyun 		phy->status = status;
5734*4882a593Smuzhiyun 		phy->op_busy = 0;
5735*4882a593Smuzhiyun 		if (phy->cbfn)
5736*4882a593Smuzhiyun 			phy->cbfn(phy->cbarg, phy->status);
5737*4882a593Smuzhiyun 		break;
5738*4882a593Smuzhiyun 	case BFI_PHY_I2H_STATS_RSP:
5739*4882a593Smuzhiyun 		status = be32_to_cpu(m.stats->status);
5740*4882a593Smuzhiyun 		bfa_trc(phy, status);
5741*4882a593Smuzhiyun 
5742*4882a593Smuzhiyun 		if (status == BFA_STATUS_OK) {
5743*4882a593Smuzhiyun 			struct bfa_phy_stats_s *stats =
5744*4882a593Smuzhiyun 				(struct bfa_phy_stats_s *) phy->ubuf;
5745*4882a593Smuzhiyun 			bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5746*4882a593Smuzhiyun 				sizeof(struct bfa_phy_stats_s));
5747*4882a593Smuzhiyun 			bfa_trc(phy, stats->status);
5748*4882a593Smuzhiyun 		}
5749*4882a593Smuzhiyun 
5750*4882a593Smuzhiyun 		phy->status = status;
5751*4882a593Smuzhiyun 		phy->op_busy = 0;
5752*4882a593Smuzhiyun 		if (phy->cbfn)
5753*4882a593Smuzhiyun 			phy->cbfn(phy->cbarg, phy->status);
5754*4882a593Smuzhiyun 		break;
5755*4882a593Smuzhiyun 	case BFI_PHY_I2H_WRITE_RSP:
5756*4882a593Smuzhiyun 		status = be32_to_cpu(m.write->status);
5757*4882a593Smuzhiyun 		bfa_trc(phy, status);
5758*4882a593Smuzhiyun 
5759*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK || phy->residue == 0) {
5760*4882a593Smuzhiyun 			phy->status = status;
5761*4882a593Smuzhiyun 			phy->op_busy = 0;
5762*4882a593Smuzhiyun 			if (phy->cbfn)
5763*4882a593Smuzhiyun 				phy->cbfn(phy->cbarg, phy->status);
5764*4882a593Smuzhiyun 		} else {
5765*4882a593Smuzhiyun 			bfa_trc(phy, phy->offset);
5766*4882a593Smuzhiyun 			bfa_phy_write_send(phy);
5767*4882a593Smuzhiyun 		}
5768*4882a593Smuzhiyun 		break;
5769*4882a593Smuzhiyun 	case BFI_PHY_I2H_READ_RSP:
5770*4882a593Smuzhiyun 		status = be32_to_cpu(m.read->status);
5771*4882a593Smuzhiyun 		bfa_trc(phy, status);
5772*4882a593Smuzhiyun 
5773*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK) {
5774*4882a593Smuzhiyun 			phy->status = status;
5775*4882a593Smuzhiyun 			phy->op_busy = 0;
5776*4882a593Smuzhiyun 			if (phy->cbfn)
5777*4882a593Smuzhiyun 				phy->cbfn(phy->cbarg, phy->status);
5778*4882a593Smuzhiyun 		} else {
5779*4882a593Smuzhiyun 			u32 len = be32_to_cpu(m.read->length);
5780*4882a593Smuzhiyun 			u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5781*4882a593Smuzhiyun 			u16 *dbuf = (u16 *)phy->dbuf_kva;
5782*4882a593Smuzhiyun 			int i, sz = len >> 1;
5783*4882a593Smuzhiyun 
5784*4882a593Smuzhiyun 			bfa_trc(phy, phy->offset);
5785*4882a593Smuzhiyun 			bfa_trc(phy, len);
5786*4882a593Smuzhiyun 
5787*4882a593Smuzhiyun 			for (i = 0; i < sz; i++)
5788*4882a593Smuzhiyun 				buf[i] = be16_to_cpu(dbuf[i]);
5789*4882a593Smuzhiyun 
5790*4882a593Smuzhiyun 			phy->residue -= len;
5791*4882a593Smuzhiyun 			phy->offset += len;
5792*4882a593Smuzhiyun 
5793*4882a593Smuzhiyun 			if (phy->residue == 0) {
5794*4882a593Smuzhiyun 				phy->status = status;
5795*4882a593Smuzhiyun 				phy->op_busy = 0;
5796*4882a593Smuzhiyun 				if (phy->cbfn)
5797*4882a593Smuzhiyun 					phy->cbfn(phy->cbarg, phy->status);
5798*4882a593Smuzhiyun 			} else
5799*4882a593Smuzhiyun 				bfa_phy_read_send(phy);
5800*4882a593Smuzhiyun 		}
5801*4882a593Smuzhiyun 		break;
5802*4882a593Smuzhiyun 	default:
5803*4882a593Smuzhiyun 		WARN_ON(1);
5804*4882a593Smuzhiyun 	}
5805*4882a593Smuzhiyun }
5806*4882a593Smuzhiyun 
5807*4882a593Smuzhiyun /*
5808*4882a593Smuzhiyun  * DCONF state machine events
5809*4882a593Smuzhiyun  */
5810*4882a593Smuzhiyun enum bfa_dconf_event {
5811*4882a593Smuzhiyun 	BFA_DCONF_SM_INIT		= 1,	/* dconf Init */
5812*4882a593Smuzhiyun 	BFA_DCONF_SM_FLASH_COMP		= 2,	/* read/write to flash */
5813*4882a593Smuzhiyun 	BFA_DCONF_SM_WR			= 3,	/* binding change, map */
5814*4882a593Smuzhiyun 	BFA_DCONF_SM_TIMEOUT		= 4,	/* Start timer */
5815*4882a593Smuzhiyun 	BFA_DCONF_SM_EXIT		= 5,	/* exit dconf module */
5816*4882a593Smuzhiyun 	BFA_DCONF_SM_IOCDISABLE		= 6,	/* IOC disable event */
5817*4882a593Smuzhiyun };
5818*4882a593Smuzhiyun 
5819*4882a593Smuzhiyun /* forward declaration of DCONF state machine */
5820*4882a593Smuzhiyun static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5821*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5822*4882a593Smuzhiyun static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5823*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5824*4882a593Smuzhiyun static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5825*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5826*4882a593Smuzhiyun static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5827*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5828*4882a593Smuzhiyun static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5829*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5830*4882a593Smuzhiyun static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5831*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5832*4882a593Smuzhiyun static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5833*4882a593Smuzhiyun 				enum bfa_dconf_event event);
5834*4882a593Smuzhiyun 
5835*4882a593Smuzhiyun static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5836*4882a593Smuzhiyun static void bfa_dconf_timer(void *cbarg);
5837*4882a593Smuzhiyun static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5838*4882a593Smuzhiyun static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5839*4882a593Smuzhiyun 
5840*4882a593Smuzhiyun /*
5841*4882a593Smuzhiyun  * Beginning state of dconf module. Waiting for an event to start.
5842*4882a593Smuzhiyun  */
5843*4882a593Smuzhiyun static void
bfa_dconf_sm_uninit(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5844*4882a593Smuzhiyun bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5845*4882a593Smuzhiyun {
5846*4882a593Smuzhiyun 	bfa_status_t bfa_status;
5847*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
5848*4882a593Smuzhiyun 
5849*4882a593Smuzhiyun 	switch (event) {
5850*4882a593Smuzhiyun 	case BFA_DCONF_SM_INIT:
5851*4882a593Smuzhiyun 		if (dconf->min_cfg) {
5852*4882a593Smuzhiyun 			bfa_trc(dconf->bfa, dconf->min_cfg);
5853*4882a593Smuzhiyun 			bfa_fsm_send_event(&dconf->bfa->iocfc,
5854*4882a593Smuzhiyun 					IOCFC_E_DCONF_DONE);
5855*4882a593Smuzhiyun 			return;
5856*4882a593Smuzhiyun 		}
5857*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5858*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
5859*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
5860*4882a593Smuzhiyun 		bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5861*4882a593Smuzhiyun 					BFA_FLASH_PART_DRV, dconf->instance,
5862*4882a593Smuzhiyun 					dconf->dconf,
5863*4882a593Smuzhiyun 					sizeof(struct bfa_dconf_s), 0,
5864*4882a593Smuzhiyun 					bfa_dconf_init_cb, dconf->bfa);
5865*4882a593Smuzhiyun 		if (bfa_status != BFA_STATUS_OK) {
5866*4882a593Smuzhiyun 			bfa_timer_stop(&dconf->timer);
5867*4882a593Smuzhiyun 			bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5868*4882a593Smuzhiyun 			bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5869*4882a593Smuzhiyun 			return;
5870*4882a593Smuzhiyun 		}
5871*4882a593Smuzhiyun 		break;
5872*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
5873*4882a593Smuzhiyun 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5874*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
5875*4882a593Smuzhiyun 	case BFA_DCONF_SM_WR:
5876*4882a593Smuzhiyun 	case BFA_DCONF_SM_FLASH_COMP:
5877*4882a593Smuzhiyun 		break;
5878*4882a593Smuzhiyun 	default:
5879*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
5880*4882a593Smuzhiyun 	}
5881*4882a593Smuzhiyun }
5882*4882a593Smuzhiyun 
5883*4882a593Smuzhiyun /*
5884*4882a593Smuzhiyun  * Read flash for dconf entries and make a call back to the driver once done.
5885*4882a593Smuzhiyun  */
5886*4882a593Smuzhiyun static void
bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5887*4882a593Smuzhiyun bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5888*4882a593Smuzhiyun 			enum bfa_dconf_event event)
5889*4882a593Smuzhiyun {
5890*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
5891*4882a593Smuzhiyun 
5892*4882a593Smuzhiyun 	switch (event) {
5893*4882a593Smuzhiyun 	case BFA_DCONF_SM_FLASH_COMP:
5894*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5895*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5896*4882a593Smuzhiyun 		break;
5897*4882a593Smuzhiyun 	case BFA_DCONF_SM_TIMEOUT:
5898*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5899*4882a593Smuzhiyun 		bfa_ioc_suspend(&dconf->bfa->ioc);
5900*4882a593Smuzhiyun 		break;
5901*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
5902*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5903*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5904*4882a593Smuzhiyun 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5905*4882a593Smuzhiyun 		break;
5906*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
5907*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5908*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5909*4882a593Smuzhiyun 		break;
5910*4882a593Smuzhiyun 	default:
5911*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
5912*4882a593Smuzhiyun 	}
5913*4882a593Smuzhiyun }
5914*4882a593Smuzhiyun 
5915*4882a593Smuzhiyun /*
5916*4882a593Smuzhiyun  * DCONF Module is in ready state. Has completed the initialization.
5917*4882a593Smuzhiyun  */
5918*4882a593Smuzhiyun static void
bfa_dconf_sm_ready(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5919*4882a593Smuzhiyun bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5920*4882a593Smuzhiyun {
5921*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
5922*4882a593Smuzhiyun 
5923*4882a593Smuzhiyun 	switch (event) {
5924*4882a593Smuzhiyun 	case BFA_DCONF_SM_WR:
5925*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
5926*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5927*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5928*4882a593Smuzhiyun 		break;
5929*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
5930*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5931*4882a593Smuzhiyun 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5932*4882a593Smuzhiyun 		break;
5933*4882a593Smuzhiyun 	case BFA_DCONF_SM_INIT:
5934*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
5935*4882a593Smuzhiyun 		break;
5936*4882a593Smuzhiyun 	default:
5937*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
5938*4882a593Smuzhiyun 	}
5939*4882a593Smuzhiyun }
5940*4882a593Smuzhiyun 
5941*4882a593Smuzhiyun /*
5942*4882a593Smuzhiyun  * entries are dirty, write back to the flash.
5943*4882a593Smuzhiyun  */
5944*4882a593Smuzhiyun 
5945*4882a593Smuzhiyun static void
bfa_dconf_sm_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5946*4882a593Smuzhiyun bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5947*4882a593Smuzhiyun {
5948*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
5949*4882a593Smuzhiyun 
5950*4882a593Smuzhiyun 	switch (event) {
5951*4882a593Smuzhiyun 	case BFA_DCONF_SM_TIMEOUT:
5952*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5953*4882a593Smuzhiyun 		bfa_dconf_flash_write(dconf);
5954*4882a593Smuzhiyun 		break;
5955*4882a593Smuzhiyun 	case BFA_DCONF_SM_WR:
5956*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5957*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
5958*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5959*4882a593Smuzhiyun 		break;
5960*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
5961*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5962*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
5963*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5964*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5965*4882a593Smuzhiyun 		bfa_dconf_flash_write(dconf);
5966*4882a593Smuzhiyun 		break;
5967*4882a593Smuzhiyun 	case BFA_DCONF_SM_FLASH_COMP:
5968*4882a593Smuzhiyun 		break;
5969*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
5970*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5971*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5972*4882a593Smuzhiyun 		break;
5973*4882a593Smuzhiyun 	default:
5974*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
5975*4882a593Smuzhiyun 	}
5976*4882a593Smuzhiyun }
5977*4882a593Smuzhiyun 
5978*4882a593Smuzhiyun /*
5979*4882a593Smuzhiyun  * Sync the dconf entries to the flash.
5980*4882a593Smuzhiyun  */
5981*4882a593Smuzhiyun static void
bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)5982*4882a593Smuzhiyun bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5983*4882a593Smuzhiyun 			enum bfa_dconf_event event)
5984*4882a593Smuzhiyun {
5985*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
5986*4882a593Smuzhiyun 
5987*4882a593Smuzhiyun 	switch (event) {
5988*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
5989*4882a593Smuzhiyun 	case BFA_DCONF_SM_FLASH_COMP:
5990*4882a593Smuzhiyun 		bfa_timer_stop(&dconf->timer);
5991*4882a593Smuzhiyun 		fallthrough;
5992*4882a593Smuzhiyun 	case BFA_DCONF_SM_TIMEOUT:
5993*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5994*4882a593Smuzhiyun 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5995*4882a593Smuzhiyun 		break;
5996*4882a593Smuzhiyun 	default:
5997*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
5998*4882a593Smuzhiyun 	}
5999*4882a593Smuzhiyun }
6000*4882a593Smuzhiyun 
6001*4882a593Smuzhiyun static void
bfa_dconf_sm_sync(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6002*4882a593Smuzhiyun bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
6003*4882a593Smuzhiyun {
6004*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
6005*4882a593Smuzhiyun 
6006*4882a593Smuzhiyun 	switch (event) {
6007*4882a593Smuzhiyun 	case BFA_DCONF_SM_FLASH_COMP:
6008*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
6009*4882a593Smuzhiyun 		break;
6010*4882a593Smuzhiyun 	case BFA_DCONF_SM_WR:
6011*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
6012*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6013*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6014*4882a593Smuzhiyun 		break;
6015*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
6016*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
6017*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6018*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
6019*4882a593Smuzhiyun 		break;
6020*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
6021*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
6022*4882a593Smuzhiyun 		break;
6023*4882a593Smuzhiyun 	default:
6024*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
6025*4882a593Smuzhiyun 	}
6026*4882a593Smuzhiyun }
6027*4882a593Smuzhiyun 
6028*4882a593Smuzhiyun static void
bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s * dconf,enum bfa_dconf_event event)6029*4882a593Smuzhiyun bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
6030*4882a593Smuzhiyun 			enum bfa_dconf_event event)
6031*4882a593Smuzhiyun {
6032*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, event);
6033*4882a593Smuzhiyun 
6034*4882a593Smuzhiyun 	switch (event) {
6035*4882a593Smuzhiyun 	case BFA_DCONF_SM_INIT:
6036*4882a593Smuzhiyun 		bfa_timer_start(dconf->bfa, &dconf->timer,
6037*4882a593Smuzhiyun 			bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
6038*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
6039*4882a593Smuzhiyun 		break;
6040*4882a593Smuzhiyun 	case BFA_DCONF_SM_EXIT:
6041*4882a593Smuzhiyun 		bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6042*4882a593Smuzhiyun 		bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
6043*4882a593Smuzhiyun 		break;
6044*4882a593Smuzhiyun 	case BFA_DCONF_SM_IOCDISABLE:
6045*4882a593Smuzhiyun 		break;
6046*4882a593Smuzhiyun 	default:
6047*4882a593Smuzhiyun 		bfa_sm_fault(dconf->bfa, event);
6048*4882a593Smuzhiyun 	}
6049*4882a593Smuzhiyun }
6050*4882a593Smuzhiyun 
6051*4882a593Smuzhiyun /*
6052*4882a593Smuzhiyun  * Compute and return memory needed by DRV_CFG module.
6053*4882a593Smuzhiyun  */
6054*4882a593Smuzhiyun void
bfa_dconf_meminfo(struct bfa_iocfc_cfg_s * cfg,struct bfa_meminfo_s * meminfo,struct bfa_s * bfa)6055*4882a593Smuzhiyun bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
6056*4882a593Smuzhiyun 		  struct bfa_s *bfa)
6057*4882a593Smuzhiyun {
6058*4882a593Smuzhiyun 	struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
6059*4882a593Smuzhiyun 
6060*4882a593Smuzhiyun 	if (cfg->drvcfg.min_cfg)
6061*4882a593Smuzhiyun 		bfa_mem_kva_setup(meminfo, dconf_kva,
6062*4882a593Smuzhiyun 				sizeof(struct bfa_dconf_hdr_s));
6063*4882a593Smuzhiyun 	else
6064*4882a593Smuzhiyun 		bfa_mem_kva_setup(meminfo, dconf_kva,
6065*4882a593Smuzhiyun 				sizeof(struct bfa_dconf_s));
6066*4882a593Smuzhiyun }
6067*4882a593Smuzhiyun 
6068*4882a593Smuzhiyun void
bfa_dconf_attach(struct bfa_s * bfa,void * bfad,struct bfa_iocfc_cfg_s * cfg)6069*4882a593Smuzhiyun bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg)
6070*4882a593Smuzhiyun {
6071*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6072*4882a593Smuzhiyun 
6073*4882a593Smuzhiyun 	dconf->bfad = bfad;
6074*4882a593Smuzhiyun 	dconf->bfa = bfa;
6075*4882a593Smuzhiyun 	dconf->instance = bfa->ioc.port_id;
6076*4882a593Smuzhiyun 	bfa_trc(bfa, dconf->instance);
6077*4882a593Smuzhiyun 
6078*4882a593Smuzhiyun 	dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
6079*4882a593Smuzhiyun 	if (cfg->drvcfg.min_cfg) {
6080*4882a593Smuzhiyun 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
6081*4882a593Smuzhiyun 		dconf->min_cfg = BFA_TRUE;
6082*4882a593Smuzhiyun 	} else {
6083*4882a593Smuzhiyun 		dconf->min_cfg = BFA_FALSE;
6084*4882a593Smuzhiyun 		bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
6085*4882a593Smuzhiyun 	}
6086*4882a593Smuzhiyun 
6087*4882a593Smuzhiyun 	bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
6088*4882a593Smuzhiyun 	bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
6089*4882a593Smuzhiyun }
6090*4882a593Smuzhiyun 
6091*4882a593Smuzhiyun static void
bfa_dconf_init_cb(void * arg,bfa_status_t status)6092*4882a593Smuzhiyun bfa_dconf_init_cb(void *arg, bfa_status_t status)
6093*4882a593Smuzhiyun {
6094*4882a593Smuzhiyun 	struct bfa_s *bfa = arg;
6095*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6096*4882a593Smuzhiyun 
6097*4882a593Smuzhiyun 	if (status == BFA_STATUS_OK) {
6098*4882a593Smuzhiyun 		bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
6099*4882a593Smuzhiyun 		if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
6100*4882a593Smuzhiyun 			dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
6101*4882a593Smuzhiyun 		if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
6102*4882a593Smuzhiyun 			dconf->dconf->hdr.version = BFI_DCONF_VERSION;
6103*4882a593Smuzhiyun 	}
6104*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6105*4882a593Smuzhiyun 	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
6106*4882a593Smuzhiyun }
6107*4882a593Smuzhiyun 
6108*4882a593Smuzhiyun void
bfa_dconf_modinit(struct bfa_s * bfa)6109*4882a593Smuzhiyun bfa_dconf_modinit(struct bfa_s *bfa)
6110*4882a593Smuzhiyun {
6111*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6112*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
6113*4882a593Smuzhiyun }
6114*4882a593Smuzhiyun 
bfa_dconf_timer(void * cbarg)6115*4882a593Smuzhiyun static void bfa_dconf_timer(void *cbarg)
6116*4882a593Smuzhiyun {
6117*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = cbarg;
6118*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
6119*4882a593Smuzhiyun }
6120*4882a593Smuzhiyun 
6121*4882a593Smuzhiyun void
bfa_dconf_iocdisable(struct bfa_s * bfa)6122*4882a593Smuzhiyun bfa_dconf_iocdisable(struct bfa_s *bfa)
6123*4882a593Smuzhiyun {
6124*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6125*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
6126*4882a593Smuzhiyun }
6127*4882a593Smuzhiyun 
6128*4882a593Smuzhiyun static bfa_status_t
bfa_dconf_flash_write(struct bfa_dconf_mod_s * dconf)6129*4882a593Smuzhiyun bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
6130*4882a593Smuzhiyun {
6131*4882a593Smuzhiyun 	bfa_status_t bfa_status;
6132*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, 0);
6133*4882a593Smuzhiyun 
6134*4882a593Smuzhiyun 	bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
6135*4882a593Smuzhiyun 				BFA_FLASH_PART_DRV, dconf->instance,
6136*4882a593Smuzhiyun 				dconf->dconf,  sizeof(struct bfa_dconf_s), 0,
6137*4882a593Smuzhiyun 				bfa_dconf_cbfn, dconf);
6138*4882a593Smuzhiyun 	if (bfa_status != BFA_STATUS_OK)
6139*4882a593Smuzhiyun 		WARN_ON(bfa_status);
6140*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, bfa_status);
6141*4882a593Smuzhiyun 
6142*4882a593Smuzhiyun 	return bfa_status;
6143*4882a593Smuzhiyun }
6144*4882a593Smuzhiyun 
6145*4882a593Smuzhiyun bfa_status_t
bfa_dconf_update(struct bfa_s * bfa)6146*4882a593Smuzhiyun bfa_dconf_update(struct bfa_s *bfa)
6147*4882a593Smuzhiyun {
6148*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6149*4882a593Smuzhiyun 	bfa_trc(dconf->bfa, 0);
6150*4882a593Smuzhiyun 	if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
6151*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
6152*4882a593Smuzhiyun 
6153*4882a593Smuzhiyun 	if (dconf->min_cfg) {
6154*4882a593Smuzhiyun 		bfa_trc(dconf->bfa, dconf->min_cfg);
6155*4882a593Smuzhiyun 		return BFA_STATUS_FAILED;
6156*4882a593Smuzhiyun 	}
6157*4882a593Smuzhiyun 
6158*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
6159*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6160*4882a593Smuzhiyun }
6161*4882a593Smuzhiyun 
6162*4882a593Smuzhiyun static void
bfa_dconf_cbfn(void * arg,bfa_status_t status)6163*4882a593Smuzhiyun bfa_dconf_cbfn(void *arg, bfa_status_t status)
6164*4882a593Smuzhiyun {
6165*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = arg;
6166*4882a593Smuzhiyun 	WARN_ON(status);
6167*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6168*4882a593Smuzhiyun }
6169*4882a593Smuzhiyun 
6170*4882a593Smuzhiyun void
bfa_dconf_modexit(struct bfa_s * bfa)6171*4882a593Smuzhiyun bfa_dconf_modexit(struct bfa_s *bfa)
6172*4882a593Smuzhiyun {
6173*4882a593Smuzhiyun 	struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6174*4882a593Smuzhiyun 	bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6175*4882a593Smuzhiyun }
6176*4882a593Smuzhiyun 
6177*4882a593Smuzhiyun /*
6178*4882a593Smuzhiyun  * FRU specific functions
6179*4882a593Smuzhiyun  */
6180*4882a593Smuzhiyun 
6181*4882a593Smuzhiyun #define BFA_FRU_DMA_BUF_SZ	0x02000		/* 8k dma buffer */
6182*4882a593Smuzhiyun #define BFA_FRU_CHINOOK_MAX_SIZE 0x10000
6183*4882a593Smuzhiyun #define BFA_FRU_LIGHTNING_MAX_SIZE 0x200
6184*4882a593Smuzhiyun 
6185*4882a593Smuzhiyun static void
bfa_fru_notify(void * cbarg,enum bfa_ioc_event_e event)6186*4882a593Smuzhiyun bfa_fru_notify(void *cbarg, enum bfa_ioc_event_e event)
6187*4882a593Smuzhiyun {
6188*4882a593Smuzhiyun 	struct bfa_fru_s *fru = cbarg;
6189*4882a593Smuzhiyun 
6190*4882a593Smuzhiyun 	bfa_trc(fru, event);
6191*4882a593Smuzhiyun 
6192*4882a593Smuzhiyun 	switch (event) {
6193*4882a593Smuzhiyun 	case BFA_IOC_E_DISABLED:
6194*4882a593Smuzhiyun 	case BFA_IOC_E_FAILED:
6195*4882a593Smuzhiyun 		if (fru->op_busy) {
6196*4882a593Smuzhiyun 			fru->status = BFA_STATUS_IOC_FAILURE;
6197*4882a593Smuzhiyun 			fru->cbfn(fru->cbarg, fru->status);
6198*4882a593Smuzhiyun 			fru->op_busy = 0;
6199*4882a593Smuzhiyun 		}
6200*4882a593Smuzhiyun 		break;
6201*4882a593Smuzhiyun 
6202*4882a593Smuzhiyun 	default:
6203*4882a593Smuzhiyun 		break;
6204*4882a593Smuzhiyun 	}
6205*4882a593Smuzhiyun }
6206*4882a593Smuzhiyun 
6207*4882a593Smuzhiyun /*
6208*4882a593Smuzhiyun  * Send fru write request.
6209*4882a593Smuzhiyun  *
6210*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6211*4882a593Smuzhiyun  */
6212*4882a593Smuzhiyun static void
bfa_fru_write_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6213*4882a593Smuzhiyun bfa_fru_write_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6214*4882a593Smuzhiyun {
6215*4882a593Smuzhiyun 	struct bfa_fru_s *fru = cbarg;
6216*4882a593Smuzhiyun 	struct bfi_fru_write_req_s *msg =
6217*4882a593Smuzhiyun 			(struct bfi_fru_write_req_s *) fru->mb.msg;
6218*4882a593Smuzhiyun 	u32 len;
6219*4882a593Smuzhiyun 
6220*4882a593Smuzhiyun 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6221*4882a593Smuzhiyun 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6222*4882a593Smuzhiyun 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6223*4882a593Smuzhiyun 	msg->length = cpu_to_be32(len);
6224*4882a593Smuzhiyun 
6225*4882a593Smuzhiyun 	/*
6226*4882a593Smuzhiyun 	 * indicate if it's the last msg of the whole write operation
6227*4882a593Smuzhiyun 	 */
6228*4882a593Smuzhiyun 	msg->last = (len == fru->residue) ? 1 : 0;
6229*4882a593Smuzhiyun 
6230*4882a593Smuzhiyun 	msg->trfr_cmpl = (len == fru->residue) ? fru->trfr_cmpl : 0;
6231*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6232*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6233*4882a593Smuzhiyun 
6234*4882a593Smuzhiyun 	memcpy(fru->dbuf_kva, fru->ubuf + fru->offset, len);
6235*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6236*4882a593Smuzhiyun 
6237*4882a593Smuzhiyun 	fru->residue -= len;
6238*4882a593Smuzhiyun 	fru->offset += len;
6239*4882a593Smuzhiyun }
6240*4882a593Smuzhiyun 
6241*4882a593Smuzhiyun /*
6242*4882a593Smuzhiyun  * Send fru read request.
6243*4882a593Smuzhiyun  *
6244*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6245*4882a593Smuzhiyun  */
6246*4882a593Smuzhiyun static void
bfa_fru_read_send(void * cbarg,enum bfi_fru_h2i_msgs msg_type)6247*4882a593Smuzhiyun bfa_fru_read_send(void *cbarg, enum bfi_fru_h2i_msgs msg_type)
6248*4882a593Smuzhiyun {
6249*4882a593Smuzhiyun 	struct bfa_fru_s *fru = cbarg;
6250*4882a593Smuzhiyun 	struct bfi_fru_read_req_s *msg =
6251*4882a593Smuzhiyun 			(struct bfi_fru_read_req_s *) fru->mb.msg;
6252*4882a593Smuzhiyun 	u32 len;
6253*4882a593Smuzhiyun 
6254*4882a593Smuzhiyun 	msg->offset = cpu_to_be32(fru->addr_off + fru->offset);
6255*4882a593Smuzhiyun 	len = (fru->residue < BFA_FRU_DMA_BUF_SZ) ?
6256*4882a593Smuzhiyun 				fru->residue : BFA_FRU_DMA_BUF_SZ;
6257*4882a593Smuzhiyun 	msg->length = cpu_to_be32(len);
6258*4882a593Smuzhiyun 	bfi_h2i_set(msg->mh, BFI_MC_FRU, msg_type, bfa_ioc_portid(fru->ioc));
6259*4882a593Smuzhiyun 	bfa_alen_set(&msg->alen, len, fru->dbuf_pa);
6260*4882a593Smuzhiyun 	bfa_ioc_mbox_queue(fru->ioc, &fru->mb);
6261*4882a593Smuzhiyun }
6262*4882a593Smuzhiyun 
6263*4882a593Smuzhiyun /*
6264*4882a593Smuzhiyun  * Flash memory info API.
6265*4882a593Smuzhiyun  *
6266*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
6267*4882a593Smuzhiyun  */
6268*4882a593Smuzhiyun u32
bfa_fru_meminfo(bfa_boolean_t mincfg)6269*4882a593Smuzhiyun bfa_fru_meminfo(bfa_boolean_t mincfg)
6270*4882a593Smuzhiyun {
6271*4882a593Smuzhiyun 	/* min driver doesn't need fru */
6272*4882a593Smuzhiyun 	if (mincfg)
6273*4882a593Smuzhiyun 		return 0;
6274*4882a593Smuzhiyun 
6275*4882a593Smuzhiyun 	return BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6276*4882a593Smuzhiyun }
6277*4882a593Smuzhiyun 
6278*4882a593Smuzhiyun /*
6279*4882a593Smuzhiyun  * Flash attach API.
6280*4882a593Smuzhiyun  *
6281*4882a593Smuzhiyun  * @param[in] fru - fru structure
6282*4882a593Smuzhiyun  * @param[in] ioc  - ioc structure
6283*4882a593Smuzhiyun  * @param[in] dev  - device structure
6284*4882a593Smuzhiyun  * @param[in] trcmod - trace module
6285*4882a593Smuzhiyun  * @param[in] logmod - log module
6286*4882a593Smuzhiyun  */
6287*4882a593Smuzhiyun void
bfa_fru_attach(struct bfa_fru_s * fru,struct bfa_ioc_s * ioc,void * dev,struct bfa_trc_mod_s * trcmod,bfa_boolean_t mincfg)6288*4882a593Smuzhiyun bfa_fru_attach(struct bfa_fru_s *fru, struct bfa_ioc_s *ioc, void *dev,
6289*4882a593Smuzhiyun 	struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
6290*4882a593Smuzhiyun {
6291*4882a593Smuzhiyun 	fru->ioc = ioc;
6292*4882a593Smuzhiyun 	fru->trcmod = trcmod;
6293*4882a593Smuzhiyun 	fru->cbfn = NULL;
6294*4882a593Smuzhiyun 	fru->cbarg = NULL;
6295*4882a593Smuzhiyun 	fru->op_busy = 0;
6296*4882a593Smuzhiyun 
6297*4882a593Smuzhiyun 	bfa_ioc_mbox_regisr(fru->ioc, BFI_MC_FRU, bfa_fru_intr, fru);
6298*4882a593Smuzhiyun 	bfa_q_qe_init(&fru->ioc_notify);
6299*4882a593Smuzhiyun 	bfa_ioc_notify_init(&fru->ioc_notify, bfa_fru_notify, fru);
6300*4882a593Smuzhiyun 	list_add_tail(&fru->ioc_notify.qe, &fru->ioc->notify_q);
6301*4882a593Smuzhiyun 
6302*4882a593Smuzhiyun 	/* min driver doesn't need fru */
6303*4882a593Smuzhiyun 	if (mincfg) {
6304*4882a593Smuzhiyun 		fru->dbuf_kva = NULL;
6305*4882a593Smuzhiyun 		fru->dbuf_pa = 0;
6306*4882a593Smuzhiyun 	}
6307*4882a593Smuzhiyun }
6308*4882a593Smuzhiyun 
6309*4882a593Smuzhiyun /*
6310*4882a593Smuzhiyun  * Claim memory for fru
6311*4882a593Smuzhiyun  *
6312*4882a593Smuzhiyun  * @param[in] fru - fru structure
6313*4882a593Smuzhiyun  * @param[in] dm_kva - pointer to virtual memory address
6314*4882a593Smuzhiyun  * @param[in] dm_pa - frusical memory address
6315*4882a593Smuzhiyun  * @param[in] mincfg - minimal cfg variable
6316*4882a593Smuzhiyun  */
6317*4882a593Smuzhiyun void
bfa_fru_memclaim(struct bfa_fru_s * fru,u8 * dm_kva,u64 dm_pa,bfa_boolean_t mincfg)6318*4882a593Smuzhiyun bfa_fru_memclaim(struct bfa_fru_s *fru, u8 *dm_kva, u64 dm_pa,
6319*4882a593Smuzhiyun 	bfa_boolean_t mincfg)
6320*4882a593Smuzhiyun {
6321*4882a593Smuzhiyun 	if (mincfg)
6322*4882a593Smuzhiyun 		return;
6323*4882a593Smuzhiyun 
6324*4882a593Smuzhiyun 	fru->dbuf_kva = dm_kva;
6325*4882a593Smuzhiyun 	fru->dbuf_pa = dm_pa;
6326*4882a593Smuzhiyun 	memset(fru->dbuf_kva, 0, BFA_FRU_DMA_BUF_SZ);
6327*4882a593Smuzhiyun 	dm_kva += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6328*4882a593Smuzhiyun 	dm_pa += BFA_ROUNDUP(BFA_FRU_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
6329*4882a593Smuzhiyun }
6330*4882a593Smuzhiyun 
6331*4882a593Smuzhiyun /*
6332*4882a593Smuzhiyun  * Update fru vpd image.
6333*4882a593Smuzhiyun  *
6334*4882a593Smuzhiyun  * @param[in] fru - fru structure
6335*4882a593Smuzhiyun  * @param[in] buf - update data buffer
6336*4882a593Smuzhiyun  * @param[in] len - data buffer length
6337*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
6338*4882a593Smuzhiyun  * @param[in] cbfn - callback function
6339*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6340*4882a593Smuzhiyun  *
6341*4882a593Smuzhiyun  * Return status.
6342*4882a593Smuzhiyun  */
6343*4882a593Smuzhiyun bfa_status_t
bfa_fruvpd_update(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg,u8 trfr_cmpl)6344*4882a593Smuzhiyun bfa_fruvpd_update(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6345*4882a593Smuzhiyun 		  bfa_cb_fru_t cbfn, void *cbarg, u8 trfr_cmpl)
6346*4882a593Smuzhiyun {
6347*4882a593Smuzhiyun 	bfa_trc(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6348*4882a593Smuzhiyun 	bfa_trc(fru, len);
6349*4882a593Smuzhiyun 	bfa_trc(fru, offset);
6350*4882a593Smuzhiyun 
6351*4882a593Smuzhiyun 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2 &&
6352*4882a593Smuzhiyun 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6353*4882a593Smuzhiyun 		return BFA_STATUS_FRU_NOT_PRESENT;
6354*4882a593Smuzhiyun 
6355*4882a593Smuzhiyun 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK)
6356*4882a593Smuzhiyun 		return BFA_STATUS_CMD_NOTSUPP;
6357*4882a593Smuzhiyun 
6358*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(fru->ioc))
6359*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
6360*4882a593Smuzhiyun 
6361*4882a593Smuzhiyun 	if (fru->op_busy) {
6362*4882a593Smuzhiyun 		bfa_trc(fru, fru->op_busy);
6363*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
6364*4882a593Smuzhiyun 	}
6365*4882a593Smuzhiyun 
6366*4882a593Smuzhiyun 	fru->op_busy = 1;
6367*4882a593Smuzhiyun 
6368*4882a593Smuzhiyun 	fru->cbfn = cbfn;
6369*4882a593Smuzhiyun 	fru->cbarg = cbarg;
6370*4882a593Smuzhiyun 	fru->residue = len;
6371*4882a593Smuzhiyun 	fru->offset = 0;
6372*4882a593Smuzhiyun 	fru->addr_off = offset;
6373*4882a593Smuzhiyun 	fru->ubuf = buf;
6374*4882a593Smuzhiyun 	fru->trfr_cmpl = trfr_cmpl;
6375*4882a593Smuzhiyun 
6376*4882a593Smuzhiyun 	bfa_fru_write_send(fru, BFI_FRUVPD_H2I_WRITE_REQ);
6377*4882a593Smuzhiyun 
6378*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6379*4882a593Smuzhiyun }
6380*4882a593Smuzhiyun 
6381*4882a593Smuzhiyun /*
6382*4882a593Smuzhiyun  * Read fru vpd image.
6383*4882a593Smuzhiyun  *
6384*4882a593Smuzhiyun  * @param[in] fru - fru structure
6385*4882a593Smuzhiyun  * @param[in] buf - read data buffer
6386*4882a593Smuzhiyun  * @param[in] len - data buffer length
6387*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
6388*4882a593Smuzhiyun  * @param[in] cbfn - callback function
6389*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6390*4882a593Smuzhiyun  *
6391*4882a593Smuzhiyun  * Return status.
6392*4882a593Smuzhiyun  */
6393*4882a593Smuzhiyun bfa_status_t
bfa_fruvpd_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6394*4882a593Smuzhiyun bfa_fruvpd_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6395*4882a593Smuzhiyun 		bfa_cb_fru_t cbfn, void *cbarg)
6396*4882a593Smuzhiyun {
6397*4882a593Smuzhiyun 	bfa_trc(fru, BFI_FRUVPD_H2I_READ_REQ);
6398*4882a593Smuzhiyun 	bfa_trc(fru, len);
6399*4882a593Smuzhiyun 	bfa_trc(fru, offset);
6400*4882a593Smuzhiyun 
6401*4882a593Smuzhiyun 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6402*4882a593Smuzhiyun 		return BFA_STATUS_FRU_NOT_PRESENT;
6403*4882a593Smuzhiyun 
6404*4882a593Smuzhiyun 	if (fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK &&
6405*4882a593Smuzhiyun 		fru->ioc->attr->card_type != BFA_MFG_TYPE_CHINOOK2)
6406*4882a593Smuzhiyun 		return BFA_STATUS_CMD_NOTSUPP;
6407*4882a593Smuzhiyun 
6408*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(fru->ioc))
6409*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
6410*4882a593Smuzhiyun 
6411*4882a593Smuzhiyun 	if (fru->op_busy) {
6412*4882a593Smuzhiyun 		bfa_trc(fru, fru->op_busy);
6413*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
6414*4882a593Smuzhiyun 	}
6415*4882a593Smuzhiyun 
6416*4882a593Smuzhiyun 	fru->op_busy = 1;
6417*4882a593Smuzhiyun 
6418*4882a593Smuzhiyun 	fru->cbfn = cbfn;
6419*4882a593Smuzhiyun 	fru->cbarg = cbarg;
6420*4882a593Smuzhiyun 	fru->residue = len;
6421*4882a593Smuzhiyun 	fru->offset = 0;
6422*4882a593Smuzhiyun 	fru->addr_off = offset;
6423*4882a593Smuzhiyun 	fru->ubuf = buf;
6424*4882a593Smuzhiyun 	bfa_fru_read_send(fru, BFI_FRUVPD_H2I_READ_REQ);
6425*4882a593Smuzhiyun 
6426*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6427*4882a593Smuzhiyun }
6428*4882a593Smuzhiyun 
6429*4882a593Smuzhiyun /*
6430*4882a593Smuzhiyun  * Get maximum size fru vpd image.
6431*4882a593Smuzhiyun  *
6432*4882a593Smuzhiyun  * @param[in] fru - fru structure
6433*4882a593Smuzhiyun  * @param[out] size - maximum size of fru vpd data
6434*4882a593Smuzhiyun  *
6435*4882a593Smuzhiyun  * Return status.
6436*4882a593Smuzhiyun  */
6437*4882a593Smuzhiyun bfa_status_t
bfa_fruvpd_get_max_size(struct bfa_fru_s * fru,u32 * max_size)6438*4882a593Smuzhiyun bfa_fruvpd_get_max_size(struct bfa_fru_s *fru, u32 *max_size)
6439*4882a593Smuzhiyun {
6440*4882a593Smuzhiyun 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6441*4882a593Smuzhiyun 		return BFA_STATUS_FRU_NOT_PRESENT;
6442*4882a593Smuzhiyun 
6443*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(fru->ioc))
6444*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
6445*4882a593Smuzhiyun 
6446*4882a593Smuzhiyun 	if (fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK ||
6447*4882a593Smuzhiyun 		fru->ioc->attr->card_type == BFA_MFG_TYPE_CHINOOK2)
6448*4882a593Smuzhiyun 		*max_size = BFA_FRU_CHINOOK_MAX_SIZE;
6449*4882a593Smuzhiyun 	else
6450*4882a593Smuzhiyun 		return BFA_STATUS_CMD_NOTSUPP;
6451*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6452*4882a593Smuzhiyun }
6453*4882a593Smuzhiyun /*
6454*4882a593Smuzhiyun  * tfru write.
6455*4882a593Smuzhiyun  *
6456*4882a593Smuzhiyun  * @param[in] fru - fru structure
6457*4882a593Smuzhiyun  * @param[in] buf - update data buffer
6458*4882a593Smuzhiyun  * @param[in] len - data buffer length
6459*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
6460*4882a593Smuzhiyun  * @param[in] cbfn - callback function
6461*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6462*4882a593Smuzhiyun  *
6463*4882a593Smuzhiyun  * Return status.
6464*4882a593Smuzhiyun  */
6465*4882a593Smuzhiyun bfa_status_t
bfa_tfru_write(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6466*4882a593Smuzhiyun bfa_tfru_write(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6467*4882a593Smuzhiyun 	       bfa_cb_fru_t cbfn, void *cbarg)
6468*4882a593Smuzhiyun {
6469*4882a593Smuzhiyun 	bfa_trc(fru, BFI_TFRU_H2I_WRITE_REQ);
6470*4882a593Smuzhiyun 	bfa_trc(fru, len);
6471*4882a593Smuzhiyun 	bfa_trc(fru, offset);
6472*4882a593Smuzhiyun 	bfa_trc(fru, *((u8 *) buf));
6473*4882a593Smuzhiyun 
6474*4882a593Smuzhiyun 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6475*4882a593Smuzhiyun 		return BFA_STATUS_FRU_NOT_PRESENT;
6476*4882a593Smuzhiyun 
6477*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(fru->ioc))
6478*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
6479*4882a593Smuzhiyun 
6480*4882a593Smuzhiyun 	if (fru->op_busy) {
6481*4882a593Smuzhiyun 		bfa_trc(fru, fru->op_busy);
6482*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
6483*4882a593Smuzhiyun 	}
6484*4882a593Smuzhiyun 
6485*4882a593Smuzhiyun 	fru->op_busy = 1;
6486*4882a593Smuzhiyun 
6487*4882a593Smuzhiyun 	fru->cbfn = cbfn;
6488*4882a593Smuzhiyun 	fru->cbarg = cbarg;
6489*4882a593Smuzhiyun 	fru->residue = len;
6490*4882a593Smuzhiyun 	fru->offset = 0;
6491*4882a593Smuzhiyun 	fru->addr_off = offset;
6492*4882a593Smuzhiyun 	fru->ubuf = buf;
6493*4882a593Smuzhiyun 
6494*4882a593Smuzhiyun 	bfa_fru_write_send(fru, BFI_TFRU_H2I_WRITE_REQ);
6495*4882a593Smuzhiyun 
6496*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6497*4882a593Smuzhiyun }
6498*4882a593Smuzhiyun 
6499*4882a593Smuzhiyun /*
6500*4882a593Smuzhiyun  * tfru read.
6501*4882a593Smuzhiyun  *
6502*4882a593Smuzhiyun  * @param[in] fru - fru structure
6503*4882a593Smuzhiyun  * @param[in] buf - read data buffer
6504*4882a593Smuzhiyun  * @param[in] len - data buffer length
6505*4882a593Smuzhiyun  * @param[in] offset - offset relative to starting address
6506*4882a593Smuzhiyun  * @param[in] cbfn - callback function
6507*4882a593Smuzhiyun  * @param[in] cbarg - callback argument
6508*4882a593Smuzhiyun  *
6509*4882a593Smuzhiyun  * Return status.
6510*4882a593Smuzhiyun  */
6511*4882a593Smuzhiyun bfa_status_t
bfa_tfru_read(struct bfa_fru_s * fru,void * buf,u32 len,u32 offset,bfa_cb_fru_t cbfn,void * cbarg)6512*4882a593Smuzhiyun bfa_tfru_read(struct bfa_fru_s *fru, void *buf, u32 len, u32 offset,
6513*4882a593Smuzhiyun 	      bfa_cb_fru_t cbfn, void *cbarg)
6514*4882a593Smuzhiyun {
6515*4882a593Smuzhiyun 	bfa_trc(fru, BFI_TFRU_H2I_READ_REQ);
6516*4882a593Smuzhiyun 	bfa_trc(fru, len);
6517*4882a593Smuzhiyun 	bfa_trc(fru, offset);
6518*4882a593Smuzhiyun 
6519*4882a593Smuzhiyun 	if (fru->ioc->asic_gen != BFI_ASIC_GEN_CT2)
6520*4882a593Smuzhiyun 		return BFA_STATUS_FRU_NOT_PRESENT;
6521*4882a593Smuzhiyun 
6522*4882a593Smuzhiyun 	if (!bfa_ioc_is_operational(fru->ioc))
6523*4882a593Smuzhiyun 		return BFA_STATUS_IOC_NON_OP;
6524*4882a593Smuzhiyun 
6525*4882a593Smuzhiyun 	if (fru->op_busy) {
6526*4882a593Smuzhiyun 		bfa_trc(fru, fru->op_busy);
6527*4882a593Smuzhiyun 		return BFA_STATUS_DEVBUSY;
6528*4882a593Smuzhiyun 	}
6529*4882a593Smuzhiyun 
6530*4882a593Smuzhiyun 	fru->op_busy = 1;
6531*4882a593Smuzhiyun 
6532*4882a593Smuzhiyun 	fru->cbfn = cbfn;
6533*4882a593Smuzhiyun 	fru->cbarg = cbarg;
6534*4882a593Smuzhiyun 	fru->residue = len;
6535*4882a593Smuzhiyun 	fru->offset = 0;
6536*4882a593Smuzhiyun 	fru->addr_off = offset;
6537*4882a593Smuzhiyun 	fru->ubuf = buf;
6538*4882a593Smuzhiyun 	bfa_fru_read_send(fru, BFI_TFRU_H2I_READ_REQ);
6539*4882a593Smuzhiyun 
6540*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6541*4882a593Smuzhiyun }
6542*4882a593Smuzhiyun 
6543*4882a593Smuzhiyun /*
6544*4882a593Smuzhiyun  * Process fru response messages upon receiving interrupts.
6545*4882a593Smuzhiyun  *
6546*4882a593Smuzhiyun  * @param[in] fruarg - fru structure
6547*4882a593Smuzhiyun  * @param[in] msg - message structure
6548*4882a593Smuzhiyun  */
6549*4882a593Smuzhiyun void
bfa_fru_intr(void * fruarg,struct bfi_mbmsg_s * msg)6550*4882a593Smuzhiyun bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)
6551*4882a593Smuzhiyun {
6552*4882a593Smuzhiyun 	struct bfa_fru_s *fru = fruarg;
6553*4882a593Smuzhiyun 	struct bfi_fru_rsp_s *rsp = (struct bfi_fru_rsp_s *)msg;
6554*4882a593Smuzhiyun 	u32 status;
6555*4882a593Smuzhiyun 
6556*4882a593Smuzhiyun 	bfa_trc(fru, msg->mh.msg_id);
6557*4882a593Smuzhiyun 
6558*4882a593Smuzhiyun 	if (!fru->op_busy) {
6559*4882a593Smuzhiyun 		/*
6560*4882a593Smuzhiyun 		 * receiving response after ioc failure
6561*4882a593Smuzhiyun 		 */
6562*4882a593Smuzhiyun 		bfa_trc(fru, 0x9999);
6563*4882a593Smuzhiyun 		return;
6564*4882a593Smuzhiyun 	}
6565*4882a593Smuzhiyun 
6566*4882a593Smuzhiyun 	switch (msg->mh.msg_id) {
6567*4882a593Smuzhiyun 	case BFI_FRUVPD_I2H_WRITE_RSP:
6568*4882a593Smuzhiyun 	case BFI_TFRU_I2H_WRITE_RSP:
6569*4882a593Smuzhiyun 		status = be32_to_cpu(rsp->status);
6570*4882a593Smuzhiyun 		bfa_trc(fru, status);
6571*4882a593Smuzhiyun 
6572*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK || fru->residue == 0) {
6573*4882a593Smuzhiyun 			fru->status = status;
6574*4882a593Smuzhiyun 			fru->op_busy = 0;
6575*4882a593Smuzhiyun 			if (fru->cbfn)
6576*4882a593Smuzhiyun 				fru->cbfn(fru->cbarg, fru->status);
6577*4882a593Smuzhiyun 		} else {
6578*4882a593Smuzhiyun 			bfa_trc(fru, fru->offset);
6579*4882a593Smuzhiyun 			if (msg->mh.msg_id == BFI_FRUVPD_I2H_WRITE_RSP)
6580*4882a593Smuzhiyun 				bfa_fru_write_send(fru,
6581*4882a593Smuzhiyun 					BFI_FRUVPD_H2I_WRITE_REQ);
6582*4882a593Smuzhiyun 			else
6583*4882a593Smuzhiyun 				bfa_fru_write_send(fru,
6584*4882a593Smuzhiyun 					BFI_TFRU_H2I_WRITE_REQ);
6585*4882a593Smuzhiyun 		}
6586*4882a593Smuzhiyun 		break;
6587*4882a593Smuzhiyun 	case BFI_FRUVPD_I2H_READ_RSP:
6588*4882a593Smuzhiyun 	case BFI_TFRU_I2H_READ_RSP:
6589*4882a593Smuzhiyun 		status = be32_to_cpu(rsp->status);
6590*4882a593Smuzhiyun 		bfa_trc(fru, status);
6591*4882a593Smuzhiyun 
6592*4882a593Smuzhiyun 		if (status != BFA_STATUS_OK) {
6593*4882a593Smuzhiyun 			fru->status = status;
6594*4882a593Smuzhiyun 			fru->op_busy = 0;
6595*4882a593Smuzhiyun 			if (fru->cbfn)
6596*4882a593Smuzhiyun 				fru->cbfn(fru->cbarg, fru->status);
6597*4882a593Smuzhiyun 		} else {
6598*4882a593Smuzhiyun 			u32 len = be32_to_cpu(rsp->length);
6599*4882a593Smuzhiyun 
6600*4882a593Smuzhiyun 			bfa_trc(fru, fru->offset);
6601*4882a593Smuzhiyun 			bfa_trc(fru, len);
6602*4882a593Smuzhiyun 
6603*4882a593Smuzhiyun 			memcpy(fru->ubuf + fru->offset, fru->dbuf_kva, len);
6604*4882a593Smuzhiyun 			fru->residue -= len;
6605*4882a593Smuzhiyun 			fru->offset += len;
6606*4882a593Smuzhiyun 
6607*4882a593Smuzhiyun 			if (fru->residue == 0) {
6608*4882a593Smuzhiyun 				fru->status = status;
6609*4882a593Smuzhiyun 				fru->op_busy = 0;
6610*4882a593Smuzhiyun 				if (fru->cbfn)
6611*4882a593Smuzhiyun 					fru->cbfn(fru->cbarg, fru->status);
6612*4882a593Smuzhiyun 			} else {
6613*4882a593Smuzhiyun 				if (msg->mh.msg_id == BFI_FRUVPD_I2H_READ_RSP)
6614*4882a593Smuzhiyun 					bfa_fru_read_send(fru,
6615*4882a593Smuzhiyun 						BFI_FRUVPD_H2I_READ_REQ);
6616*4882a593Smuzhiyun 				else
6617*4882a593Smuzhiyun 					bfa_fru_read_send(fru,
6618*4882a593Smuzhiyun 						BFI_TFRU_H2I_READ_REQ);
6619*4882a593Smuzhiyun 			}
6620*4882a593Smuzhiyun 		}
6621*4882a593Smuzhiyun 		break;
6622*4882a593Smuzhiyun 	default:
6623*4882a593Smuzhiyun 		WARN_ON(1);
6624*4882a593Smuzhiyun 	}
6625*4882a593Smuzhiyun }
6626*4882a593Smuzhiyun 
6627*4882a593Smuzhiyun /*
6628*4882a593Smuzhiyun  * register definitions
6629*4882a593Smuzhiyun  */
6630*4882a593Smuzhiyun #define FLI_CMD_REG			0x0001d000
6631*4882a593Smuzhiyun #define FLI_RDDATA_REG			0x0001d010
6632*4882a593Smuzhiyun #define FLI_ADDR_REG			0x0001d004
6633*4882a593Smuzhiyun #define FLI_DEV_STATUS_REG		0x0001d014
6634*4882a593Smuzhiyun 
6635*4882a593Smuzhiyun #define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
6636*4882a593Smuzhiyun #define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
6637*4882a593Smuzhiyun #define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
6638*4882a593Smuzhiyun #define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
6639*4882a593Smuzhiyun 
6640*4882a593Smuzhiyun enum bfa_flash_cmd {
6641*4882a593Smuzhiyun 	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
6642*4882a593Smuzhiyun 	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
6643*4882a593Smuzhiyun };
6644*4882a593Smuzhiyun 
6645*4882a593Smuzhiyun /*
6646*4882a593Smuzhiyun  * Hardware error definition
6647*4882a593Smuzhiyun  */
6648*4882a593Smuzhiyun enum bfa_flash_err {
6649*4882a593Smuzhiyun 	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
6650*4882a593Smuzhiyun 	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
6651*4882a593Smuzhiyun 	BFA_FLASH_BAD		= -3,	/*!< flash bad */
6652*4882a593Smuzhiyun 	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
6653*4882a593Smuzhiyun 	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
6654*4882a593Smuzhiyun 	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
6655*4882a593Smuzhiyun 	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
6656*4882a593Smuzhiyun 	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
6657*4882a593Smuzhiyun 	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
6658*4882a593Smuzhiyun };
6659*4882a593Smuzhiyun 
6660*4882a593Smuzhiyun /*
6661*4882a593Smuzhiyun  * Flash command register data structure
6662*4882a593Smuzhiyun  */
6663*4882a593Smuzhiyun union bfa_flash_cmd_reg_u {
6664*4882a593Smuzhiyun 	struct {
6665*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
6666*4882a593Smuzhiyun 		u32	act:1;
6667*4882a593Smuzhiyun 		u32	rsv:1;
6668*4882a593Smuzhiyun 		u32	write_cnt:9;
6669*4882a593Smuzhiyun 		u32	read_cnt:9;
6670*4882a593Smuzhiyun 		u32	addr_cnt:4;
6671*4882a593Smuzhiyun 		u32	cmd:8;
6672*4882a593Smuzhiyun #else
6673*4882a593Smuzhiyun 		u32	cmd:8;
6674*4882a593Smuzhiyun 		u32	addr_cnt:4;
6675*4882a593Smuzhiyun 		u32	read_cnt:9;
6676*4882a593Smuzhiyun 		u32	write_cnt:9;
6677*4882a593Smuzhiyun 		u32	rsv:1;
6678*4882a593Smuzhiyun 		u32	act:1;
6679*4882a593Smuzhiyun #endif
6680*4882a593Smuzhiyun 	} r;
6681*4882a593Smuzhiyun 	u32	i;
6682*4882a593Smuzhiyun };
6683*4882a593Smuzhiyun 
6684*4882a593Smuzhiyun /*
6685*4882a593Smuzhiyun  * Flash device status register data structure
6686*4882a593Smuzhiyun  */
6687*4882a593Smuzhiyun union bfa_flash_dev_status_reg_u {
6688*4882a593Smuzhiyun 	struct {
6689*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
6690*4882a593Smuzhiyun 		u32	rsv:21;
6691*4882a593Smuzhiyun 		u32	fifo_cnt:6;
6692*4882a593Smuzhiyun 		u32	busy:1;
6693*4882a593Smuzhiyun 		u32	init_status:1;
6694*4882a593Smuzhiyun 		u32	present:1;
6695*4882a593Smuzhiyun 		u32	bad:1;
6696*4882a593Smuzhiyun 		u32	good:1;
6697*4882a593Smuzhiyun #else
6698*4882a593Smuzhiyun 		u32	good:1;
6699*4882a593Smuzhiyun 		u32	bad:1;
6700*4882a593Smuzhiyun 		u32	present:1;
6701*4882a593Smuzhiyun 		u32	init_status:1;
6702*4882a593Smuzhiyun 		u32	busy:1;
6703*4882a593Smuzhiyun 		u32	fifo_cnt:6;
6704*4882a593Smuzhiyun 		u32	rsv:21;
6705*4882a593Smuzhiyun #endif
6706*4882a593Smuzhiyun 	} r;
6707*4882a593Smuzhiyun 	u32	i;
6708*4882a593Smuzhiyun };
6709*4882a593Smuzhiyun 
6710*4882a593Smuzhiyun /*
6711*4882a593Smuzhiyun  * Flash address register data structure
6712*4882a593Smuzhiyun  */
6713*4882a593Smuzhiyun union bfa_flash_addr_reg_u {
6714*4882a593Smuzhiyun 	struct {
6715*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
6716*4882a593Smuzhiyun 		u32	addr:24;
6717*4882a593Smuzhiyun 		u32	dummy:8;
6718*4882a593Smuzhiyun #else
6719*4882a593Smuzhiyun 		u32	dummy:8;
6720*4882a593Smuzhiyun 		u32	addr:24;
6721*4882a593Smuzhiyun #endif
6722*4882a593Smuzhiyun 	} r;
6723*4882a593Smuzhiyun 	u32	i;
6724*4882a593Smuzhiyun };
6725*4882a593Smuzhiyun 
6726*4882a593Smuzhiyun /*
6727*4882a593Smuzhiyun  * dg flash_raw_private Flash raw private functions
6728*4882a593Smuzhiyun  */
6729*4882a593Smuzhiyun static void
bfa_flash_set_cmd(void __iomem * pci_bar,u8 wr_cnt,u8 rd_cnt,u8 ad_cnt,u8 op)6730*4882a593Smuzhiyun bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
6731*4882a593Smuzhiyun 		  u8 rd_cnt, u8 ad_cnt, u8 op)
6732*4882a593Smuzhiyun {
6733*4882a593Smuzhiyun 	union bfa_flash_cmd_reg_u cmd;
6734*4882a593Smuzhiyun 
6735*4882a593Smuzhiyun 	cmd.i = 0;
6736*4882a593Smuzhiyun 	cmd.r.act = 1;
6737*4882a593Smuzhiyun 	cmd.r.write_cnt = wr_cnt;
6738*4882a593Smuzhiyun 	cmd.r.read_cnt = rd_cnt;
6739*4882a593Smuzhiyun 	cmd.r.addr_cnt = ad_cnt;
6740*4882a593Smuzhiyun 	cmd.r.cmd = op;
6741*4882a593Smuzhiyun 	writel(cmd.i, (pci_bar + FLI_CMD_REG));
6742*4882a593Smuzhiyun }
6743*4882a593Smuzhiyun 
6744*4882a593Smuzhiyun static void
bfa_flash_set_addr(void __iomem * pci_bar,u32 address)6745*4882a593Smuzhiyun bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
6746*4882a593Smuzhiyun {
6747*4882a593Smuzhiyun 	union bfa_flash_addr_reg_u addr;
6748*4882a593Smuzhiyun 
6749*4882a593Smuzhiyun 	addr.r.addr = address & 0x00ffffff;
6750*4882a593Smuzhiyun 	addr.r.dummy = 0;
6751*4882a593Smuzhiyun 	writel(addr.i, (pci_bar + FLI_ADDR_REG));
6752*4882a593Smuzhiyun }
6753*4882a593Smuzhiyun 
6754*4882a593Smuzhiyun static int
bfa_flash_cmd_act_check(void __iomem * pci_bar)6755*4882a593Smuzhiyun bfa_flash_cmd_act_check(void __iomem *pci_bar)
6756*4882a593Smuzhiyun {
6757*4882a593Smuzhiyun 	union bfa_flash_cmd_reg_u cmd;
6758*4882a593Smuzhiyun 
6759*4882a593Smuzhiyun 	cmd.i = readl(pci_bar + FLI_CMD_REG);
6760*4882a593Smuzhiyun 
6761*4882a593Smuzhiyun 	if (cmd.r.act)
6762*4882a593Smuzhiyun 		return BFA_FLASH_ERR_CMD_ACT;
6763*4882a593Smuzhiyun 
6764*4882a593Smuzhiyun 	return 0;
6765*4882a593Smuzhiyun }
6766*4882a593Smuzhiyun 
6767*4882a593Smuzhiyun /*
6768*4882a593Smuzhiyun  * @brief
6769*4882a593Smuzhiyun  * Flush FLI data fifo.
6770*4882a593Smuzhiyun  *
6771*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6772*4882a593Smuzhiyun  * @param[in] dev_status - device status
6773*4882a593Smuzhiyun  *
6774*4882a593Smuzhiyun  * Return 0 on success, negative error number on error.
6775*4882a593Smuzhiyun  */
6776*4882a593Smuzhiyun static u32
bfa_flash_fifo_flush(void __iomem * pci_bar)6777*4882a593Smuzhiyun bfa_flash_fifo_flush(void __iomem *pci_bar)
6778*4882a593Smuzhiyun {
6779*4882a593Smuzhiyun 	u32 i;
6780*4882a593Smuzhiyun 	union bfa_flash_dev_status_reg_u dev_status;
6781*4882a593Smuzhiyun 
6782*4882a593Smuzhiyun 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6783*4882a593Smuzhiyun 
6784*4882a593Smuzhiyun 	if (!dev_status.r.fifo_cnt)
6785*4882a593Smuzhiyun 		return 0;
6786*4882a593Smuzhiyun 
6787*4882a593Smuzhiyun 	/* fifo counter in terms of words */
6788*4882a593Smuzhiyun 	for (i = 0; i < dev_status.r.fifo_cnt; i++)
6789*4882a593Smuzhiyun 		readl(pci_bar + FLI_RDDATA_REG);
6790*4882a593Smuzhiyun 
6791*4882a593Smuzhiyun 	/*
6792*4882a593Smuzhiyun 	 * Check the device status. It may take some time.
6793*4882a593Smuzhiyun 	 */
6794*4882a593Smuzhiyun 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6795*4882a593Smuzhiyun 		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6796*4882a593Smuzhiyun 		if (!dev_status.r.fifo_cnt)
6797*4882a593Smuzhiyun 			break;
6798*4882a593Smuzhiyun 	}
6799*4882a593Smuzhiyun 
6800*4882a593Smuzhiyun 	if (dev_status.r.fifo_cnt)
6801*4882a593Smuzhiyun 		return BFA_FLASH_ERR_FIFO_CNT;
6802*4882a593Smuzhiyun 
6803*4882a593Smuzhiyun 	return 0;
6804*4882a593Smuzhiyun }
6805*4882a593Smuzhiyun 
6806*4882a593Smuzhiyun /*
6807*4882a593Smuzhiyun  * @brief
6808*4882a593Smuzhiyun  * Read flash status.
6809*4882a593Smuzhiyun  *
6810*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6811*4882a593Smuzhiyun  *
6812*4882a593Smuzhiyun  * Return 0 on success, negative error number on error.
6813*4882a593Smuzhiyun */
6814*4882a593Smuzhiyun static u32
bfa_flash_status_read(void __iomem * pci_bar)6815*4882a593Smuzhiyun bfa_flash_status_read(void __iomem *pci_bar)
6816*4882a593Smuzhiyun {
6817*4882a593Smuzhiyun 	union bfa_flash_dev_status_reg_u	dev_status;
6818*4882a593Smuzhiyun 	int				status;
6819*4882a593Smuzhiyun 	u32			ret_status;
6820*4882a593Smuzhiyun 	int				i;
6821*4882a593Smuzhiyun 
6822*4882a593Smuzhiyun 	status = bfa_flash_fifo_flush(pci_bar);
6823*4882a593Smuzhiyun 	if (status < 0)
6824*4882a593Smuzhiyun 		return status;
6825*4882a593Smuzhiyun 
6826*4882a593Smuzhiyun 	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
6827*4882a593Smuzhiyun 
6828*4882a593Smuzhiyun 	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
6829*4882a593Smuzhiyun 		status = bfa_flash_cmd_act_check(pci_bar);
6830*4882a593Smuzhiyun 		if (!status)
6831*4882a593Smuzhiyun 			break;
6832*4882a593Smuzhiyun 	}
6833*4882a593Smuzhiyun 
6834*4882a593Smuzhiyun 	if (status)
6835*4882a593Smuzhiyun 		return status;
6836*4882a593Smuzhiyun 
6837*4882a593Smuzhiyun 	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
6838*4882a593Smuzhiyun 	if (!dev_status.r.fifo_cnt)
6839*4882a593Smuzhiyun 		return BFA_FLASH_BUSY;
6840*4882a593Smuzhiyun 
6841*4882a593Smuzhiyun 	ret_status = readl(pci_bar + FLI_RDDATA_REG);
6842*4882a593Smuzhiyun 	ret_status >>= 24;
6843*4882a593Smuzhiyun 
6844*4882a593Smuzhiyun 	status = bfa_flash_fifo_flush(pci_bar);
6845*4882a593Smuzhiyun 	if (status < 0)
6846*4882a593Smuzhiyun 		return status;
6847*4882a593Smuzhiyun 
6848*4882a593Smuzhiyun 	return ret_status;
6849*4882a593Smuzhiyun }
6850*4882a593Smuzhiyun 
6851*4882a593Smuzhiyun /*
6852*4882a593Smuzhiyun  * @brief
6853*4882a593Smuzhiyun  * Start flash read operation.
6854*4882a593Smuzhiyun  *
6855*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6856*4882a593Smuzhiyun  * @param[in] offset - flash address offset
6857*4882a593Smuzhiyun  * @param[in] len - read data length
6858*4882a593Smuzhiyun  * @param[in] buf - read data buffer
6859*4882a593Smuzhiyun  *
6860*4882a593Smuzhiyun  * Return 0 on success, negative error number on error.
6861*4882a593Smuzhiyun  */
6862*4882a593Smuzhiyun static u32
bfa_flash_read_start(void __iomem * pci_bar,u32 offset,u32 len,char * buf)6863*4882a593Smuzhiyun bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
6864*4882a593Smuzhiyun 			 char *buf)
6865*4882a593Smuzhiyun {
6866*4882a593Smuzhiyun 	int status;
6867*4882a593Smuzhiyun 
6868*4882a593Smuzhiyun 	/*
6869*4882a593Smuzhiyun 	 * len must be mutiple of 4 and not exceeding fifo size
6870*4882a593Smuzhiyun 	 */
6871*4882a593Smuzhiyun 	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
6872*4882a593Smuzhiyun 		return BFA_FLASH_ERR_LEN;
6873*4882a593Smuzhiyun 
6874*4882a593Smuzhiyun 	/*
6875*4882a593Smuzhiyun 	 * check status
6876*4882a593Smuzhiyun 	 */
6877*4882a593Smuzhiyun 	status = bfa_flash_status_read(pci_bar);
6878*4882a593Smuzhiyun 	if (status == BFA_FLASH_BUSY)
6879*4882a593Smuzhiyun 		status = bfa_flash_status_read(pci_bar);
6880*4882a593Smuzhiyun 
6881*4882a593Smuzhiyun 	if (status < 0)
6882*4882a593Smuzhiyun 		return status;
6883*4882a593Smuzhiyun 
6884*4882a593Smuzhiyun 	/*
6885*4882a593Smuzhiyun 	 * check if write-in-progress bit is cleared
6886*4882a593Smuzhiyun 	 */
6887*4882a593Smuzhiyun 	if (status & BFA_FLASH_WIP_MASK)
6888*4882a593Smuzhiyun 		return BFA_FLASH_ERR_WIP;
6889*4882a593Smuzhiyun 
6890*4882a593Smuzhiyun 	bfa_flash_set_addr(pci_bar, offset);
6891*4882a593Smuzhiyun 
6892*4882a593Smuzhiyun 	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
6893*4882a593Smuzhiyun 
6894*4882a593Smuzhiyun 	return 0;
6895*4882a593Smuzhiyun }
6896*4882a593Smuzhiyun 
6897*4882a593Smuzhiyun /*
6898*4882a593Smuzhiyun  * @brief
6899*4882a593Smuzhiyun  * Check flash read operation.
6900*4882a593Smuzhiyun  *
6901*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6902*4882a593Smuzhiyun  *
6903*4882a593Smuzhiyun  * Return flash device status, 1 if busy, 0 if not.
6904*4882a593Smuzhiyun  */
6905*4882a593Smuzhiyun static u32
bfa_flash_read_check(void __iomem * pci_bar)6906*4882a593Smuzhiyun bfa_flash_read_check(void __iomem *pci_bar)
6907*4882a593Smuzhiyun {
6908*4882a593Smuzhiyun 	if (bfa_flash_cmd_act_check(pci_bar))
6909*4882a593Smuzhiyun 		return 1;
6910*4882a593Smuzhiyun 
6911*4882a593Smuzhiyun 	return 0;
6912*4882a593Smuzhiyun }
6913*4882a593Smuzhiyun 
6914*4882a593Smuzhiyun /*
6915*4882a593Smuzhiyun  * @brief
6916*4882a593Smuzhiyun  * End flash read operation.
6917*4882a593Smuzhiyun  *
6918*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6919*4882a593Smuzhiyun  * @param[in] len - read data length
6920*4882a593Smuzhiyun  * @param[in] buf - read data buffer
6921*4882a593Smuzhiyun  *
6922*4882a593Smuzhiyun  */
6923*4882a593Smuzhiyun static void
bfa_flash_read_end(void __iomem * pci_bar,u32 len,char * buf)6924*4882a593Smuzhiyun bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
6925*4882a593Smuzhiyun {
6926*4882a593Smuzhiyun 
6927*4882a593Smuzhiyun 	u32 i;
6928*4882a593Smuzhiyun 
6929*4882a593Smuzhiyun 	/*
6930*4882a593Smuzhiyun 	 * read data fifo up to 32 words
6931*4882a593Smuzhiyun 	 */
6932*4882a593Smuzhiyun 	for (i = 0; i < len; i += 4) {
6933*4882a593Smuzhiyun 		u32 w = readl(pci_bar + FLI_RDDATA_REG);
6934*4882a593Smuzhiyun 		*((u32 *) (buf + i)) = swab32(w);
6935*4882a593Smuzhiyun 	}
6936*4882a593Smuzhiyun 
6937*4882a593Smuzhiyun 	bfa_flash_fifo_flush(pci_bar);
6938*4882a593Smuzhiyun }
6939*4882a593Smuzhiyun 
6940*4882a593Smuzhiyun /*
6941*4882a593Smuzhiyun  * @brief
6942*4882a593Smuzhiyun  * Perform flash raw read.
6943*4882a593Smuzhiyun  *
6944*4882a593Smuzhiyun  * @param[in] pci_bar - pci bar address
6945*4882a593Smuzhiyun  * @param[in] offset - flash partition address offset
6946*4882a593Smuzhiyun  * @param[in] buf - read data buffer
6947*4882a593Smuzhiyun  * @param[in] len - read data length
6948*4882a593Smuzhiyun  *
6949*4882a593Smuzhiyun  * Return status.
6950*4882a593Smuzhiyun  */
6951*4882a593Smuzhiyun 
6952*4882a593Smuzhiyun 
6953*4882a593Smuzhiyun #define FLASH_BLOCKING_OP_MAX   500
6954*4882a593Smuzhiyun #define FLASH_SEM_LOCK_REG	0x18820
6955*4882a593Smuzhiyun 
6956*4882a593Smuzhiyun static int
bfa_raw_sem_get(void __iomem * bar)6957*4882a593Smuzhiyun bfa_raw_sem_get(void __iomem *bar)
6958*4882a593Smuzhiyun {
6959*4882a593Smuzhiyun 	int	locked;
6960*4882a593Smuzhiyun 
6961*4882a593Smuzhiyun 	locked = readl((bar + FLASH_SEM_LOCK_REG));
6962*4882a593Smuzhiyun 	return !locked;
6963*4882a593Smuzhiyun 
6964*4882a593Smuzhiyun }
6965*4882a593Smuzhiyun 
6966*4882a593Smuzhiyun static bfa_status_t
bfa_flash_sem_get(void __iomem * bar)6967*4882a593Smuzhiyun bfa_flash_sem_get(void __iomem *bar)
6968*4882a593Smuzhiyun {
6969*4882a593Smuzhiyun 	u32 n = FLASH_BLOCKING_OP_MAX;
6970*4882a593Smuzhiyun 
6971*4882a593Smuzhiyun 	while (!bfa_raw_sem_get(bar)) {
6972*4882a593Smuzhiyun 		if (--n <= 0)
6973*4882a593Smuzhiyun 			return BFA_STATUS_BADFLASH;
6974*4882a593Smuzhiyun 		mdelay(10);
6975*4882a593Smuzhiyun 	}
6976*4882a593Smuzhiyun 	return BFA_STATUS_OK;
6977*4882a593Smuzhiyun }
6978*4882a593Smuzhiyun 
6979*4882a593Smuzhiyun static void
bfa_flash_sem_put(void __iomem * bar)6980*4882a593Smuzhiyun bfa_flash_sem_put(void __iomem *bar)
6981*4882a593Smuzhiyun {
6982*4882a593Smuzhiyun 	writel(0, (bar + FLASH_SEM_LOCK_REG));
6983*4882a593Smuzhiyun }
6984*4882a593Smuzhiyun 
6985*4882a593Smuzhiyun bfa_status_t
bfa_flash_raw_read(void __iomem * pci_bar,u32 offset,char * buf,u32 len)6986*4882a593Smuzhiyun bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
6987*4882a593Smuzhiyun 		       u32 len)
6988*4882a593Smuzhiyun {
6989*4882a593Smuzhiyun 	u32 n;
6990*4882a593Smuzhiyun 	int status;
6991*4882a593Smuzhiyun 	u32 off, l, s, residue, fifo_sz;
6992*4882a593Smuzhiyun 
6993*4882a593Smuzhiyun 	residue = len;
6994*4882a593Smuzhiyun 	off = 0;
6995*4882a593Smuzhiyun 	fifo_sz = BFA_FLASH_FIFO_SIZE;
6996*4882a593Smuzhiyun 	status = bfa_flash_sem_get(pci_bar);
6997*4882a593Smuzhiyun 	if (status != BFA_STATUS_OK)
6998*4882a593Smuzhiyun 		return status;
6999*4882a593Smuzhiyun 
7000*4882a593Smuzhiyun 	while (residue) {
7001*4882a593Smuzhiyun 		s = offset + off;
7002*4882a593Smuzhiyun 		n = s / fifo_sz;
7003*4882a593Smuzhiyun 		l = (n + 1) * fifo_sz - s;
7004*4882a593Smuzhiyun 		if (l > residue)
7005*4882a593Smuzhiyun 			l = residue;
7006*4882a593Smuzhiyun 
7007*4882a593Smuzhiyun 		status = bfa_flash_read_start(pci_bar, offset + off, l,
7008*4882a593Smuzhiyun 								&buf[off]);
7009*4882a593Smuzhiyun 		if (status < 0) {
7010*4882a593Smuzhiyun 			bfa_flash_sem_put(pci_bar);
7011*4882a593Smuzhiyun 			return BFA_STATUS_FAILED;
7012*4882a593Smuzhiyun 		}
7013*4882a593Smuzhiyun 
7014*4882a593Smuzhiyun 		n = BFA_FLASH_BLOCKING_OP_MAX;
7015*4882a593Smuzhiyun 		while (bfa_flash_read_check(pci_bar)) {
7016*4882a593Smuzhiyun 			if (--n <= 0) {
7017*4882a593Smuzhiyun 				bfa_flash_sem_put(pci_bar);
7018*4882a593Smuzhiyun 				return BFA_STATUS_FAILED;
7019*4882a593Smuzhiyun 			}
7020*4882a593Smuzhiyun 		}
7021*4882a593Smuzhiyun 
7022*4882a593Smuzhiyun 		bfa_flash_read_end(pci_bar, l, &buf[off]);
7023*4882a593Smuzhiyun 
7024*4882a593Smuzhiyun 		residue -= l;
7025*4882a593Smuzhiyun 		off += l;
7026*4882a593Smuzhiyun 	}
7027*4882a593Smuzhiyun 	bfa_flash_sem_put(pci_bar);
7028*4882a593Smuzhiyun 
7029*4882a593Smuzhiyun 	return BFA_STATUS_OK;
7030*4882a593Smuzhiyun }
7031