1 /*
2 * Misc utility routines for accessing chip-specific features
3 * of the BOOKER NCI (non coherent interconnect) based Broadcom chips.
4 *
5 * Broadcom Proprietary and Confidential. Copyright (C) 2020,
6 * All Rights Reserved.
7 *
8 * This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom;
9 * the contents of this file may not be disclosed to third parties,
10 * copied or duplicated in any form, in whole or in part, without
11 * the prior written permission of Broadcom.
12 *
13 *
14 * <<Broadcom-WL-IPTag/Proprietary:>>
15 */
16
17 #include <typedefs.h>
18 #include <hndsoc.h>
19 #include <sbchipc.h>
20 #include <pcicfg.h>
21 #include <pcie_core.h>
22 #include "siutils_priv.h"
23 #include <nci.h>
24 #include <bcmdevs.h>
25 #include <hndoobr.h>
26
27 #define NCI_BAD_REG 0xbbadd000u /* Bad Register Address */
28 #define NCI_BAD_INDEX -1 /* Bad Index */
29
30 #define OOBR_BASE_MASK 0x00001FFFu /* Mask to get Base address of OOBR */
31 #define EROM1_BASE_MASK 0x00000FFFu /* Mask to get Base address of EROM1 */
32
33 /* Core Info */
34 #define COREINFO_COREID_MASK 0x00000FFFu /* Bit-11 to 0 */
35 #define COREINFO_REV_MASK 0x000FF000u /* Core Rev Mask */
36 #define COREINFO_REV_SHIFT 12u /* Bit-12 */
37 #define COREINFO_MFG_MASK 0x00F00000u /* Core Mfg Mask */
38 #define COREINFO_MFG_SHIFT 20u /* Bit-20 */
39 #define COREINFO_BPID_MASK 0x07000000u /* 26-24 Gives Backplane ID */
40 #define COREINFO_BPID_SHIFT 24u /* Bit:26-24 */
41 #define COREINFO_ISBP_MASK 0x08000000u /* Is Backplane or Bridge */
42 #define COREINFO_ISBP_SHIFT 27u /* Bit:27 */
43
44 /* Interface Config */
45 #define IC_IFACECNT_MASK 0x0000F000u /* No of Interface Descriptor Mask */
46 #define IC_IFACECNT_SHIFT 12u /* Bit-12 */
47 #define IC_IFACEOFFSET_MASK 0x00000FFFu /* OFFSET for 1st Interface Descriptor */
48
49 /* DMP Reg Offset */
50 #define DMP_DMPCTRL_REG_OFFSET 8u
51
52 /* Interface Descriptor Masks */
53 #define ID_NODEPTR_MASK 0xFFFFFFF8u /* Master/Slave Network Interface Addr */
54 #define ID_NODETYPE_MASK 0x00000007u /* 0:Booker 1:IDM 1-0xf:Reserved */
55 #define ID_WORDOFFSET_MASK 0xF0000000u /* WordOffset to next Iface Desc in EROM2 */
56 #define ID_WORDOFFSET_SHIFT 28u /* WordOffset bits 31-28 */
57 #define ID_CORETYPE_MASK 0x08000000u /* CORE belongs to OOBR(0) or EROM(1) */
58 #define ID_CORETYPE_SHIFT 27u /* Bit-27 */
59 #define ID_MI_MASK 0x04000000u /* 0: Slave Interface, 1:Master Interface */
60 #define ID_MI_SHIFT 26u /* Bit-26 */
61 #define ID_NADDR_MASK 0x03000000u /* No of Slave Address Regions */
62 #define ID_NADDR_SHIFT 24u /* Bit:25-24 */
63 #define ID_BPID_MASK 0x00F00000u /* Give Backplane ID */
64 #define ID_BPID_SHIFT 20u /* Bit:20-23 */
65 #define ID_COREINFOPTR_MASK 0x00001FFFu /* OOBR or EROM Offset */
66 #define ID_ENDMARKER 0xFFFFFFFFu /* End of EROM Part 2 */
67
68 /* Slave Port Address Descriptor Masks */
69 #define SLAVEPORT_BASE_ADDR_MASK 0xFFFFFF00u /* Bits 31:8 is the base address */
70 #define SLAVEPORT_BOUND_ADDR_MASK 0x00000040u /* Addr is not 2^n and with bound addr */
71 #define SLAVEPORT_BOUND_ADDR_SHIFT 6u /* Bit-6 */
72 #define SLAVEPORT_64BIT_ADDR_MASK 0x00000020u /* 64-bit base and bound fields */
73 #define SLAVEPORT_64BIT_ADDR_SHIFT 5u /* Bit-5 */
74 #define SLAVEPORT_ADDR_SIZE_MASK 0x0000001Fu /* Address Size mask */
75 #define SLAVEPORT_ADDR_TYPE_BOUND 0x1u /* Bound Addr */
76 #define SLAVEPORT_ADDR_TYPE_64 0x2u /* 64-Bit Addr */
77 #define SLAVEPORT_ADDR_MIN_SHIFT 0x8u
78 /* Address space Size of the slave port */
79 #define SLAVEPORT_ADDR_SIZE(adesc) (1u << ((adesc & SLAVEPORT_ADDR_SIZE_MASK) + \
80 SLAVEPORT_ADDR_MIN_SHIFT))
81
82 #define GET_NEXT_EROM_ADDR(addr) ((uint32*)((uintptr)(addr) + 4u))
83
84 #define NCI_DEFAULT_CORE_UNIT (0u)
85
86 /* Error Codes */
87 enum {
88 NCI_OK = 0,
89 NCI_BACKPLANE_ID_MISMATCH = -1,
90 NCI_INVALID_EROM2PTR = -2,
91 NCI_WORDOFFSET_MISMATCH = -3,
92 NCI_NOMEM = -4,
93 NCI_MASTER_INVALID_ADDR = -5
94 };
95
96 #define GET_OOBR_BASE(erom2base) ((erom2base) & ~OOBR_BASE_MASK)
97 #define GET_EROM1_BASE(erom2base) ((erom2base) & ~EROM1_BASE_MASK)
98 #define CORE_ID(core_info) ((core_info) & COREINFO_COREID_MASK)
99 #define GET_INFACECNT(iface_cfg) (((iface_cfg) & IC_IFACECNT_MASK) >> IC_IFACECNT_SHIFT)
100 #define GET_NODEPTR(iface_desc_0) ((iface_desc_0) & ID_NODEPTR_MASK)
101 #define GET_NODETYPE(iface_desc_0) ((iface_desc_0) & ID_NODETYPE_MASK)
102 #define GET_WORDOFFSET(iface_desc_1) (((iface_desc_1) & ID_WORDOFFSET_MASK) \
103 >> ID_WORDOFFSET_SHIFT)
104 #define IS_MASTER(iface_desc_1) (((iface_desc_1) & ID_MI_MASK) >> ID_MI_SHIFT)
105 #define GET_CORETYPE(iface_desc_1) (((iface_desc_1) & ID_CORETYPE_MASK) >> ID_CORETYPE_SHIFT)
106 #define GET_NUM_ADDR_REG(iface_desc_1) (((iface_desc_1) & ID_NADDR_MASK) >> ID_NADDR_SHIFT)
107 #define GET_COREOFFSET(iface_desc_1) ((iface_desc_1) & ID_COREINFOPTR_MASK)
108 #define ADDR_SIZE(sz) ((1u << ((sz) + 8u)) - 1u)
109
110 #define CORE_REV(core_info) ((core_info) & COREINFO_REV_MASK) >> COREINFO_REV_SHIFT
111 #define CORE_MFG(core_info) ((core_info) & COREINFO_MFG_MASK) >> COREINFO_MFG_SHIFT
112 #define COREINFO_BPID(core_info) (((core_info) & COREINFO_BPID_MASK) >> COREINFO_BPID_SHIFT)
113 #define IS_BACKPLANE(core_info) (((core_info) & COREINFO_ISBP_MASK) >> COREINFO_ISBP_SHIFT)
114 #define ID_BPID(iface_desc_1) (((iface_desc_1) & ID_BPID_MASK) >> ID_BPID_SHIFT)
115 #define IS_BACKPLANE_ID_SAME(core_info, iface_desc_1) \
116 (COREINFO_BPID((core_info)) == ID_BPID((iface_desc_1)))
117
118 #define NCI_WORD_SIZE (4u)
119 #define PCI_ACCESS_SIZE (4u)
120
121 #define NCI_ADDR2NUM(addr) ((uintptr)(addr))
122 #define NCI_ADD_NUM(addr, size) (NCI_ADDR2NUM(addr) + (size))
123 #ifdef DONGLEBUILD
124 #define NCI_ADD_ADDR(addr, size) ((uint32*)REG_MAP(NCI_ADD_NUM((addr), (size)), 0u))
125 #else /* !DONGLEBUILD */
126 #define NCI_ADD_ADDR(addr, size) ((uint32*)(NCI_ADD_NUM((addr), (size))))
127 #endif /* DONGLEBUILD */
128 #define NCI_INC_ADDR(addr, size) ((addr) = NCI_ADD_ADDR((addr), (size)))
129
130 #define NODE_TYPE_BOOKER 0x0u
131 #define NODE_TYPE_NIC400 0x1u
132
133 #define BP_BOOKER 0x0u
134 #define BP_NIC400 0x1u
135 #define BP_APB1 0x2u
136 #define BP_APB2 0x3u
137 #define BP_CCI400 0x4u
138
139 #define PCIE_WRITE_SIZE 4u
140
141 static const char BACKPLANE_ID_NAME[][11] = {
142 "BOOKER",
143 "NIC400",
144 "APB1",
145 "APB2",
146 "CCI400",
147 "\0"
148 };
149
150 #define APB_INF(ifd) ((ID_BPID((ifd).iface_desc_1) == BP_APB1) || \
151 (ID_BPID((ifd).iface_desc_1) == BP_APB2))
152 #define BOOKER_INF(ifd) (ID_BPID((ifd).iface_desc_1) == BP_BOOKER)
153 #define NIC_INF(ifd) (ID_BPID((ifd).iface_desc_1) == BP_NIC400)
154
155 /* BOOKER NCI LOG LEVEL */
156 #define NCI_LOG_LEVEL_ERROR 0x1u
157 #define NCI_LOG_LEVEL_TRACE 0x2u
158 #define NCI_LOG_LEVEL_INFO 0x4u
159 #define NCI_LOG_LEVEL_PRINT 0x8u
160
161 #ifndef NCI_DEFAULT_LOG_LEVEL
162 #define NCI_DEFAULT_LOG_LEVEL (NCI_LOG_LEVEL_ERROR)
163 #endif /* NCI_DEFAULT_LOG_LEVEL */
164
165 uint32 nci_log_level = NCI_DEFAULT_LOG_LEVEL;
166
167 #ifdef DONGLEBUILD
168 #define NCI_ERROR(args) do { if (nci_log_level & NCI_LOG_LEVEL_ERROR) { printf args; } } while (0u)
169 #define NCI_TRACE(args) do { if (nci_log_level & NCI_LOG_LEVEL_TRACE) { printf args; } } while (0u)
170 #define NCI_INFO(args) do { if (nci_log_level & NCI_LOG_LEVEL_INFO) { printf args; } } while (0u)
171 #define NCI_PRINT(args) do { if (nci_log_level & NCI_LOG_LEVEL_PRINT) { printf args; } } while (0u)
172 #else /* !DONGLEBUILD */
173 #define NCI_KERN_PRINT(...) printk(KERN_ERR __VA_ARGS__)
174 #define NCI_ERROR(args) do { if (nci_log_level & NCI_LOG_LEVEL_ERROR) \
175 { NCI_KERN_PRINT args; } } while (0u)
176 #define NCI_TRACE(args) do { if (nci_log_level & NCI_LOG_LEVEL_TRACE) \
177 { NCI_KERN_PRINT args; } } while (0u)
178 #define NCI_INFO(args) do { if (nci_log_level & NCI_LOG_LEVEL_INFO) \
179 { NCI_KERN_PRINT args; } } while (0u)
180 #define NCI_PRINT(args) do { if (nci_log_level & NCI_LOG_LEVEL_PRINT) \
181 { NCI_KERN_PRINT args; } } while (0u)
182 #endif /* DONGLEBUILD */
183
184 #define NCI_EROM_WORD_SIZEOF 4u
185 #define NCI_REGS_PER_CORE 2u
186
187 #define NCI_EROM1_LEN(erom2base) (erom2base - GET_EROM1_BASE(erom2base))
188 #define NCI_NONOOBR_CORES(erom2base) NCI_EROM1_LEN(erom2base) \
189 /(NCI_REGS_PER_CORE * NCI_EROM_WORD_SIZEOF)
190
191 /* AXI ID to CoreID + unit mappings */
192 typedef struct nci_axi_to_coreidx {
193 uint coreid;
194 uint coreunit;
195 } nci_axi_to_coreidx_t;
196
197 static const nci_axi_to_coreidx_t axi2coreidx_4397[] = {
198 {CC_CORE_ID, 0}, /* 00 Chipcommon */
199 {PCIE2_CORE_ID, 0}, /* 01 PCIe */
200 {D11_CORE_ID, 0}, /* 02 D11 Main */
201 {ARMCR4_CORE_ID, 0}, /* 03 ARM */
202 {BT_CORE_ID, 0}, /* 04 BT AHB */
203 {D11_CORE_ID, 1}, /* 05 D11 Aux */
204 {D11_CORE_ID, 0}, /* 06 D11 Main l1 */
205 {D11_CORE_ID, 1}, /* 07 D11 Aux l1 */
206 {D11_CORE_ID, 0}, /* 08 D11 Main l2 */
207 {D11_CORE_ID, 1}, /* 09 D11 Aux l2 */
208 {NODEV_CORE_ID, 0}, /* 10 M2M DMA */
209 {NODEV_CORE_ID, 0}, /* 11 unused */
210 {NODEV_CORE_ID, 0}, /* 12 unused */
211 {NODEV_CORE_ID, 0}, /* 13 unused */
212 {NODEV_CORE_ID, 0}, /* 14 unused */
213 {NODEV_CORE_ID, 0} /* 15 unused */
214 };
215
216 typedef struct slave_port {
217 uint32 adesc; /**< Address Descriptor 0 */
218 uint32 addrl; /**< Lower Base */
219 uint32 addrh; /**< Upper Base */
220 uint32 extaddrl; /**< Lower Bound */
221 uint32 extaddrh; /**< Ubber Bound */
222 } slave_port_t;
223
224 typedef struct interface_desc {
225 slave_port_t *sp; /**< Slave Port Addr 0-3 */
226
227 uint32 iface_desc_0; /**< Interface-0 Descriptor Word0 */
228 /* If Node Type 0-Booker xMNI/xSNI address. If Node Type 1-DMP wrapper Address */
229 uint32 node_ptr; /**< Core's Node pointer */
230
231 uint32 iface_desc_1; /**< Interface Descriptor Word1 */
232 uint8 num_addr_reg; /**< Number of Slave Port Addr (Valid only if master=0) */
233 uint8 coretype; /**< Core Belongs to 0:OOBR 1:Without OOBR */
234 uint8 master; /**< 1:Master 0:Slave */
235
236 uint8 node_type; /**< 0:Booker , 1:IDM Wrapper, 2-0xf: Reserved */
237 } interface_desc_t;
238
239 typedef struct nci_cores {
240 void *regs;
241 /* 2:0-Node type (0-booker,1-IDM Wrapper) 31:3-Interconnect registyer space */
242 interface_desc_t *desc; /**< Interface & Address Descriptors */
243 /*
244 * 11:0-CoreID, 19:12-RevID 23:20-MFG 26:24-Backplane ID if
245 * bit 27 is 1 (Core is Backplane or Bridge )
246 */
247 uint32 coreinfo; /**< CoreInfo of each core */
248 /*
249 * 11:0 - Offosewt of 1st Interface desc in EROM 15:12 - No.
250 * of interfaces attachedto this core
251 */
252 uint32 iface_cfg; /**< Interface config Reg */
253 uint32 dmp_regs_off; /**< DMP control & DMP status @ 0x48 from coreinfo */
254 uint32 coreid; /**< id of each core */
255 uint8 coreunit; /**< Unit differentiate same coreids */
256 uint8 iface_cnt; /**< no of Interface connected to each core */
257 uint8 PAD[2u];
258 } nci_cores_t;
259
260 typedef struct nci_info {
261 void *osh; /**< osl os handle */
262 nci_cores_t *cores; /**< Cores Parsed */
263 void *pci_bar_addr; /**< PCI BAR0 Window */
264 uint32 cc_erom2base; /**< Base of EROM2 from ChipCommon */
265 uint32 *erom1base; /**< Base of EROM1 */
266 uint32 *erom2base; /**< Base of EROM2 */
267 uint32 *oobr_base; /**< Base of OOBR */
268 uint16 bustype; /**< SI_BUS, PCI_BUS */
269 uint8 max_cores; /**< # Max cores indicated by Register */
270 uint8 num_cores; /**< # discovered cores */
271 uint8 refcnt; /**< Allocation reference count */
272 uint8 scan_done; /**< Set to TRUE when erom scan is done. */
273 uint8 PAD[2];
274 } nci_info_t;
275
276 #define NI_IDM_RESET_ENTRY 0x1
277 #define NI_IDM_RESET_EXIT 0x0
278
279 /* AXI Slave Network Interface registers */
280 typedef volatile struct asni_regs {
281 uint32 node_type; /* 0x000 */
282 uint32 node_info; /* 0x004 */
283 uint32 secr_acc; /* 0x008 */
284 uint32 pmusela; /* 0x00c */
285 uint32 pmuselb; /* 0x010 */
286 uint32 PAD[11];
287 uint32 node_feat; /* 0x040 */
288 uint32 bursplt; /* 0x044 */
289 uint32 addr_remap; /* 0x048 */
290 uint32 PAD[13];
291 uint32 sildbg; /* 0x080 */
292 uint32 qosctl; /* 0x084 */
293 uint32 wdatthrs; /* 0x088 */
294 uint32 arqosovr; /* 0x08c */
295 uint32 awqosovr; /* 0x090 */
296 uint32 atqosot; /* 0x094 */
297 uint32 arqosot; /* 0x098 */
298 uint32 awqosot; /* 0x09c */
299 uint32 axqosot; /* 0x0a0 */
300 uint32 qosrdpk; /* 0x0a4 */
301 uint32 qosrdbur; /* 0x0a8 */
302 uint32 qosrdavg; /* 0x0ac */
303 uint32 qoswrpk; /* 0x0b0 */
304 uint32 qoswrbur; /* 0x0b4 */
305 uint32 qoswravg; /* 0x0b8 */
306 uint32 qoscompk; /* 0x0bc */
307 uint32 qoscombur; /* 0x0c0 */
308 uint32 qoscomavg; /* 0x0c4 */
309 uint32 qosrbbqv; /* 0x0c8 */
310 uint32 qoswrbqv; /* 0x0cc */
311 uint32 qoscombqv; /* 0x0d0 */
312 uint32 PAD[11];
313 uint32 idm_device_id; /* 0x100 */
314 uint32 PAD[15];
315 uint32 idm_reset_ctrl; /* 0x140 */
316 } asni_regs_t;
317
318 /* AXI Master Network Interface registers */
319 typedef volatile struct amni_regs {
320 uint32 node_type; /* 0x000 */
321 uint32 node_info; /* 0x004 */
322 uint32 secr_acc; /* 0x008 */
323 uint32 pmusela; /* 0x00c */
324 uint32 pmuselb; /* 0x010 */
325 uint32 PAD[11];
326 uint32 node_feat; /* 0x040 */
327 uint32 PAD[15];
328 uint32 sildbg; /* 0x080 */
329 uint32 qosacc; /* 0x084 */
330 uint32 PAD[26];
331 uint32 interrupt_status; /* 0x0f0 */
332 uint32 interrupt_mask; /* 0x0f4 */
333 uint32 interrupt_status_ns; /* 0x0f8 */
334 uint32 interrupt_mask_ns; /* 0x0FC */
335 uint32 idm_device_id; /* 0x100 */
336 uint32 PAD[15];
337 uint32 idm_reset_ctrl; /* 0x140 */
338 } amni_regs_t;
339
340 #define NCI_SPINWAIT_TIMEOUT (300u)
341
342 /* DMP/io control and DMP/io status */
343 typedef struct dmp_regs {
344 uint32 dmpctrl;
345 uint32 dmpstatus;
346 } dmp_regs_t;
347
348 #ifdef _RTE_
349 static nci_info_t *knci_info = NULL;
350 #endif /* _RTE_ */
351
352 static void nci_save_iface1_reg(interface_desc_t *desc, uint32 iface_desc_1);
353 static uint32* nci_save_slaveport_addr(nci_info_t *nci,
354 interface_desc_t *desc, uint32 *erom2ptr);
355 static int nci_get_coreunit(nci_cores_t *cores, uint32 numcores, uint cid,
356 uint32 iface_desc_1);
357 static nci_cores_t* nci_initial_parse(nci_info_t *nci, uint32 *erom2ptr, uint32 *core_idx);
358 static void _nci_setcoreidx_pcie_bus(si_t *sih, volatile void **regs, uint32 curmap,
359 uint32 curwrap);
360 static volatile void *_nci_setcoreidx(si_t *sih, uint coreidx);
361 static uint32 _nci_get_curwrap(nci_info_t *nci, uint coreidx, uint wrapper_idx);
362 static uint32 nci_get_curwrap(nci_info_t *nci, uint coreidx);
363 static uint32 _nci_get_curmap(nci_info_t *nci, uint coreidx, uint slave_port_idx, uint base_idx);
364 static uint32 nci_get_curmap(nci_info_t *nci, uint coreidx);
365 static void _nci_core_reset(const si_t *sih, uint32 bits, uint32 resetbits);
366 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
367 static void nci_reset_APB(const si_info_t *sii, aidmp_t *ai, int *ret,
368 uint32 errlog_status, uint32 errlog_id);
369 static void nci_reset_axi_to(const si_info_t *sii, aidmp_t *ai);
370 #endif /* (AXI_TIMEOUTS) || (AXI_TIMEOUTS_NIC) */
371 static uint32 nci_find_numcores(si_t *sih);
372 #ifdef BOOKER_NIC400_INF
373 static int32 nci_find_first_wrapper_idx(nci_info_t *nci, uint32 coreidx);
374 #endif /* BOOKER_NIC400_INF */
375
376 /*
377 * Description : This function will search for a CORE with matching 'core_id' and mismatching
378 * 'wordoffset', if found then increments 'coreunit' by 1.
379 */
380 /* TODO: Need to understand this. */
381 static int
BCMATTACHFN(nci_get_coreunit)382 BCMATTACHFN(nci_get_coreunit)(nci_cores_t *cores, uint32 numcores,
383 uint core_id, uint32 iface_desc_1)
384 {
385 uint32 core_idx;
386 uint32 coreunit = NCI_DEFAULT_CORE_UNIT;
387
388 for (core_idx = 0u; core_idx < numcores; core_idx++) {
389 if ((cores[core_idx].coreid == core_id) &&
390 (GET_COREOFFSET(cores[core_idx].desc->iface_desc_1) !=
391 GET_COREOFFSET(iface_desc_1))) {
392 coreunit = cores[core_idx].coreunit + 1;
393 }
394 }
395
396 return coreunit;
397 }
398
399 /*
400 * OOBR Region
401 +-------------------------------+
402 + +
403 + OOBR with EROM Data +
404 + +
405 +-------------------------------+
406 + +
407 + EROM1 +
408 + +
409 +-------------------------------+ --> ChipCommon.EROMBASE
410 + +
411 + EROM2 +
412 + +
413 +-------------------------------+
414 */
415
416 /**
417 * Function : nci_init
418 * Description : Malloc's memory related to 'nci_info_t' and its internal elements.
419 *
420 * @paramter[in]
421 * @regs : This is a ChipCommon Regster
422 * @bustype : Bus Connect Type
423 *
424 * Return : On Succes 'nci_info_t' data structure is returned as void,
425 * where all EROM parsed Cores are saved,
426 * using this all EROM Cores are Freed.
427 * On Failure 'NULL' is returned by printing ERROR messages
428 */
429 void*
BCMATTACHFN(nci_init)430 BCMATTACHFN(nci_init)(si_t *sih, chipcregs_t *cc, uint bustype)
431 {
432 si_info_t *sii = SI_INFO(sih);
433 nci_cores_t *cores;
434 nci_info_t *nci = NULL;
435 uint8 err_at = 0u;
436
437 #ifdef _RTE_
438 if (knci_info) {
439 knci_info->refcnt++;
440 nci = knci_info;
441
442 goto end;
443 }
444 #endif /* _RTE_ */
445
446 /* It is used only when NCI_ERROR is used */
447 BCM_REFERENCE(err_at);
448
449 if ((nci = MALLOCZ(sii->osh, sizeof(*nci))) == NULL) {
450 err_at = 1u;
451 goto end;
452 }
453 sii->nci_info = nci;
454
455 nci->osh = sii->osh;
456 nci->refcnt++;
457
458 nci->cc_erom2base = R_REG(nci->osh, &cc->eromptr);
459 nci->bustype = bustype;
460 switch (nci->bustype) {
461 case SI_BUS:
462 nci->erom2base = (uint32*)REG_MAP(nci->cc_erom2base, 0u);
463 nci->oobr_base = (uint32*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), 0u);
464 nci->erom1base = (uint32*)REG_MAP(GET_EROM1_BASE(nci->cc_erom2base), 0u);
465
466 break;
467
468 case PCI_BUS:
469 /* Set wrappers address */
470 sii->curwrap = (void *)((uintptr)cc + SI_CORE_SIZE);
471 /* Set access window to Erom Base(For NCI, EROM starts with OOBR) */
472 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
473 GET_EROM1_BASE(nci->cc_erom2base));
474 nci->erom1base = (uint32*)((uintptr)cc);
475 nci->erom2base = (uint32*)((uintptr)cc + NCI_EROM1_LEN(nci->cc_erom2base));
476
477 break;
478
479 default:
480 err_at = 2u;
481 ASSERT(0u);
482 goto end;
483 }
484
485 nci->max_cores = nci_find_numcores(sih);
486 if (!nci->max_cores) {
487 err_at = 3u;
488 goto end;
489 }
490
491 if ((cores = MALLOCZ(nci->osh, sizeof(*cores) * nci->max_cores)) == NULL) {
492 err_at = 4u;
493 goto end;
494 }
495 nci->cores = cores;
496
497 #ifdef _RTE_
498 knci_info = nci;
499 #endif /* _RTE_ */
500
501 end:
502 if (err_at) {
503 NCI_ERROR(("nci_init: Failed err_at=%#x\n", err_at));
504 nci_uninit(nci);
505 nci = NULL;
506 }
507
508 return nci;
509 }
510
511 /**
512 * Function : nci_uninit
513 * Description : Free's memory related to 'nci_info_t' and its internal malloc'd elements.
514 *
515 * @paramter[in]
516 * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved, using this
517 * all EROM Cores are Freed.
518 *
519 * Return : void
520 */
521 void
BCMATTACHFN(nci_uninit)522 BCMATTACHFN(nci_uninit)(void *ctx)
523 {
524 nci_info_t *nci = (nci_info_t *)ctx;
525 uint8 core_idx, desc_idx;
526 interface_desc_t *desc;
527 nci_cores_t *cores;
528 slave_port_t *sp;
529
530 if (nci == NULL) {
531 return;
532 }
533
534 nci->refcnt--;
535
536 #ifdef _RTE_
537 if (nci->refcnt != 0) {
538 return;
539 }
540 #endif /* _RTE_ */
541
542 cores = nci->cores;
543 if (cores == NULL) {
544 goto end;
545 }
546
547 for (core_idx = 0u; core_idx < nci->num_cores; core_idx++) {
548 desc = cores[core_idx].desc;
549 if (desc == NULL) {
550 break;
551 }
552
553 for (desc_idx = 0u; desc_idx < cores[core_idx].iface_cnt; desc_idx++) {
554 sp = desc[desc_idx].sp;
555 if (sp) {
556 MFREE(nci->osh, sp, (sizeof(*sp) * desc[desc_idx].num_addr_reg));
557 }
558 }
559 MFREE(nci->osh, desc, (sizeof(*desc) * cores[core_idx].iface_cnt));
560 }
561 MFREE(nci->osh, cores, sizeof(*cores) * nci->max_cores);
562
563 end:
564
565 #ifdef _RTE_
566 knci_info = NULL;
567 #endif /* _RTE_ */
568
569 MFREE(nci->osh, nci, sizeof(*nci));
570 }
571
572 /**
573 * Function : nci_save_iface1_reg
574 * Description : Interface1 Descriptor is obtained from the Reg and saved in
575 * Internal data structures 'nci->cores'.
576 *
577 * @paramter[in]
578 * @desc : Descriptor of Core which needs to be updated with obatained Interface1 Descritpor.
579 * @iface_desc_1 : Obatained Interface1 Descritpor.
580 *
581 * Return : void
582 */
583 static void
BCMATTACHFN(nci_save_iface1_reg)584 BCMATTACHFN(nci_save_iface1_reg)(interface_desc_t *desc, uint32 iface_desc_1)
585 {
586 BCM_REFERENCE(BACKPLANE_ID_NAME);
587
588 desc->coretype = GET_CORETYPE(iface_desc_1);
589 desc->master = IS_MASTER(iface_desc_1);
590
591 desc->iface_desc_1 = iface_desc_1;
592 desc->num_addr_reg = GET_NUM_ADDR_REG(iface_desc_1);
593 if (desc->master) {
594 if (desc->num_addr_reg) {
595 NCI_ERROR(("nci_save_iface1_reg: Master NODEPTR Addresses is not zero "
596 "i.e. %d\n", GET_NUM_ADDR_REG(iface_desc_1)));
597 ASSERT(0u);
598 }
599 } else {
600 /* SLAVE 'NumAddressRegion' one less than actual slave ports, so increment by 1 */
601 desc->num_addr_reg++;
602 }
603
604 NCI_INFO(("\tnci_save_iface1_reg: %s InterfaceDesc:%#x WordOffset=%#x "
605 "NoAddrReg=%#x %s_Offset=%#x BackplaneID=%s\n",
606 desc->master?"Master":"Slave", desc->iface_desc_1,
607 GET_WORDOFFSET(desc->iface_desc_1),
608 desc->num_addr_reg, desc->coretype?"EROM1":"OOBR",
609 GET_COREOFFSET(desc->iface_desc_1),
610 BACKPLANE_ID_NAME[ID_BPID(desc->iface_desc_1)]));
611 }
612
613 /**
614 * Function : nci_save_slaveport_addr
615 * Description : All Slave Port Addr of Interface Descriptor are saved.
616 *
617 * @paramter[in]
618 * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved
619 * @desc : Current Interface Descriptor.
620 * @erom2ptr : Pointer to Address Descriptor0.
621 *
622 * Return : On Success, this function returns Erom2 Ptr to Next Interface Descriptor,
623 * On Failure, NULL is returned.
624 */
625 static uint32*
BCMATTACHFN(nci_save_slaveport_addr)626 BCMATTACHFN(nci_save_slaveport_addr)(nci_info_t *nci,
627 interface_desc_t *desc, uint32 *erom2ptr)
628 {
629 slave_port_t *sp;
630 uint32 adesc;
631 uint32 sz;
632 uint32 addr_idx;
633
634 /* Allocate 'NumAddressRegion' of Slave Port */
635 if ((desc->sp = (slave_port_t *)MALLOCZ(
636 nci->osh, (sizeof(*sp) * desc->num_addr_reg))) == NULL) {
637 NCI_ERROR(("\tnci_save_slaveport_addr: Memory Allocation failed for Slave Port\n"));
638 return NULL;
639 }
640
641 sp = desc->sp;
642 /* Slave Port Addrs Desc */
643 for (addr_idx = 0u; addr_idx < desc->num_addr_reg; addr_idx++) {
644 adesc = R_REG(nci->osh, erom2ptr);
645 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
646 sp[addr_idx].adesc = adesc;
647
648 sp[addr_idx].addrl = adesc & SLAVEPORT_BASE_ADDR_MASK;
649 if (adesc & SLAVEPORT_64BIT_ADDR_MASK) {
650 sp[addr_idx].addrh = R_REG(nci->osh, erom2ptr);
651 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
652 sp[addr_idx].extaddrl = R_REG(nci->osh, erom2ptr);
653 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
654 sp[addr_idx].extaddrh = R_REG(nci->osh, erom2ptr);
655 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
656 NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
657 "ah=0x%08x extal=0x%08x extah=0x%08x\n", addr_idx, adesc,
658 sp[addr_idx].addrl, sp[addr_idx].addrh, sp[addr_idx].extaddrl,
659 sp[addr_idx].extaddrh));
660 }
661 else if (adesc & SLAVEPORT_BOUND_ADDR_MASK) {
662 sp[addr_idx].addrh = R_REG(nci->osh, erom2ptr);
663 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
664 NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
665 "ah=0x%08x\n", addr_idx, adesc, sp[addr_idx].addrl,
666 sp[addr_idx].addrh));
667 } else {
668 sz = adesc & SLAVEPORT_ADDR_SIZE_MASK;
669 sp[addr_idx].addrh = sp[addr_idx].addrl + ADDR_SIZE(sz);
670 NCI_INFO(("\tnci_save_slaveport_addr: SlavePortAddr[%#x]:0x%08x al=0x%08x "
671 "ah=0x%08x sz=0x%08x\n", addr_idx, adesc, sp[addr_idx].addrl,
672 sp[addr_idx].addrh, sz));
673 }
674 }
675
676 return erom2ptr;
677 }
678
679 /**
680 * Function : nci_initial_parse
681 * Description : This function does
682 * 1. Obtains OOBR/EROM1 pointer based on CoreType
683 * 2. Analysis right CoreUnit for this 'core'
684 * 3. Saves CoreInfo & Interface Config in Coresponding 'core'
685 *
686 * @paramter[in]
687 * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
688 * @erom2ptr : Pointer to Interface Descriptor0.
689 * @core_idx : New core index needs to be populated in this pointer.
690 *
691 * Return : On Success, this function returns 'core' where CoreInfo & Interface Config are saved.
692 */
693 static nci_cores_t*
BCMATTACHFN(nci_initial_parse)694 BCMATTACHFN(nci_initial_parse)(nci_info_t *nci, uint32 *erom2ptr, uint32 *core_idx)
695 {
696 uint32 iface_desc_1;
697 nci_cores_t *core;
698 uint32 dmp_regs_off = 0u;
699 uint32 iface_cfg = 0u;
700 uint32 core_info;
701 uint32 *ptr;
702 uint coreid;
703
704 iface_desc_1 = R_REG(nci->osh, erom2ptr);
705
706 /* Get EROM1/OOBR Pointer based on CoreType */
707 if (!GET_CORETYPE(iface_desc_1)) {
708 if (nci->bustype == PCI_BUS) {
709 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
710 GET_OOBR_BASE(nci->cc_erom2base));
711 nci->oobr_base = (uint32*)((uintptr)nci->erom1base);
712 }
713
714 ptr = NCI_ADD_ADDR(nci->oobr_base, GET_COREOFFSET(iface_desc_1));
715 } else {
716 ptr = NCI_ADD_ADDR(nci->erom1base, GET_COREOFFSET(iface_desc_1));
717 }
718 dmp_regs_off = GET_COREOFFSET(iface_desc_1) + DMP_DMPCTRL_REG_OFFSET;
719
720 core_info = R_REG(nci->osh, ptr);
721 NCI_INC_ADDR(ptr, NCI_WORD_SIZE);
722 iface_cfg = R_REG(nci->osh, ptr);
723
724 *core_idx = nci->num_cores;
725 core = &nci->cores[*core_idx];
726
727 if (CORE_ID(core_info) < 0xFFu) {
728 coreid = CORE_ID(core_info) | 0x800u;
729 } else {
730 coreid = CORE_ID(core_info);
731 }
732
733 /* Get coreunit from previous cores i.e. num_cores */
734 core->coreunit = nci_get_coreunit(nci->cores, nci->num_cores,
735 coreid, iface_desc_1);
736
737 core->coreid = coreid;
738
739 /* Increment the num_cores once proper coreunit is known */
740 nci->num_cores++;
741
742 NCI_INFO(("\n\nnci_initial_parse: core_idx:%d %s=%p \n",
743 *core_idx, GET_CORETYPE(iface_desc_1)?"EROM1":"OOBR", ptr));
744
745 /* Core Info Register */
746 core->coreinfo = core_info;
747
748 /* Save DMP register base address. */
749 core->dmp_regs_off = dmp_regs_off;
750
751 NCI_INFO(("\tnci_initial_parse: COREINFO:%#x CId:%#x CUnit=%#x CRev=%#x CMfg=%#x\n",
752 core->coreinfo, core->coreid, core->coreunit, CORE_REV(core->coreinfo),
753 CORE_MFG(core->coreinfo)));
754
755 /* Interface Config Register */
756 core->iface_cfg = iface_cfg;
757 core->iface_cnt = GET_INFACECNT(iface_cfg);
758
759 NCI_INFO(("\tnci_initial_parse: INTERFACE_CFG:%#x IfaceCnt=%#x IfaceOffset=%#x \n",
760 iface_cfg, core->iface_cnt, iface_cfg & IC_IFACEOFFSET_MASK));
761
762 /* For PCI_BUS case set back BAR0 Window to EROM1 Base */
763 if (nci->bustype == PCI_BUS) {
764 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
765 GET_EROM1_BASE(nci->cc_erom2base));
766 }
767
768 return core;
769 }
770
771 static uint32
BCMATTACHFN(nci_find_numcores)772 BCMATTACHFN(nci_find_numcores)(si_t *sih)
773 {
774 const si_info_t *sii = SI_INFO(sih);
775 nci_info_t *nci = sii->nci_info;
776 volatile hndoobr_reg_t *oobr_reg = NULL;
777 uint32 orig_bar0_win1 = 0u;
778 uint32 num_oobr_cores = 0u;
779 uint32 num_nonoobr_cores = 0u;
780
781 /* No of Non-OOBR Cores */
782 num_nonoobr_cores = NCI_NONOOBR_CORES(nci->cc_erom2base);
783 if (num_nonoobr_cores <= 0u) {
784 NCI_ERROR(("nci_find_numcores: Invalid Number of non-OOBR cores %d\n",
785 num_nonoobr_cores));
786 goto fail;
787 }
788
789 /* No of OOBR Cores */
790 switch (BUSTYPE(sih->bustype)) {
791 case SI_BUS:
792 oobr_reg = (volatile hndoobr_reg_t*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base),
793 SI_CORE_SIZE);
794 break;
795
796 case PCI_BUS:
797 /* Save Original Bar0 Win1 */
798 orig_bar0_win1 = OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN,
799 PCI_ACCESS_SIZE);
800
801 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
802 GET_OOBR_BASE(nci->cc_erom2base));
803 oobr_reg = (volatile hndoobr_reg_t*)sii->curmap;
804 break;
805
806 default:
807 NCI_ERROR(("nci_find_numcores: Invalid bustype %d\n", BUSTYPE(sih->bustype)));
808 ASSERT(0);
809 goto fail;
810 }
811
812 num_oobr_cores = R_REG(nci->osh, &oobr_reg->capability) & OOBR_CAP_CORECNT_MASK;
813 if (num_oobr_cores <= 0u) {
814 NCI_ERROR(("nci_find_numcores: Invalid Number of OOBR cores %d\n", num_oobr_cores));
815 goto fail;
816 }
817
818 /* Point back to original base */
819 if (BUSTYPE(sih->bustype) == PCI_BUS) {
820 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
821 }
822
823 NCI_PRINT(("nci_find_numcores: Total Cores found %d\n",
824 (num_oobr_cores + num_nonoobr_cores)));
825 /* Total No of Cores */
826 return (num_oobr_cores + num_nonoobr_cores);
827
828 fail:
829 return 0u;
830 }
831
832 /**
833 * Function : nci_scan
834 * Description : Function parses EROM in BOOKER NCI Architecture and saves all inforamtion about
835 * Cores in 'nci_info_t' data structure.
836 *
837 * @paramter[in]
838 * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
839 *
840 * Return : On Success No of parsed Cores in EROM is returned,
841 * On Failure '0' is returned by printing ERROR messages
842 * in Console(If NCI_LOG_LEVEL is enabled).
843 */
844 uint32
BCMATTACHFN(nci_scan)845 BCMATTACHFN(nci_scan)(si_t *sih)
846 {
847 si_info_t *sii = SI_INFO(sih);
848 nci_info_t *nci = (nci_info_t *)sii->nci_info;
849 axi_wrapper_t * axi_wrapper = sii->axi_wrapper;
850 uint32 *cur_iface_desc_1_ptr;
851 nci_cores_t *core;
852 interface_desc_t *desc;
853 uint32 wordoffset = 0u;
854 uint32 iface_desc_0;
855 uint32 iface_desc_1;
856 uint32 *erom2ptr;
857 uint8 iface_idx;
858 uint32 core_idx;
859 int err = 0;
860
861 /* If scan was finished already */
862 if (nci->scan_done) {
863 goto end;
864 }
865
866 erom2ptr = nci->erom2base;
867 sii->axi_num_wrappers = 0;
868
869 while (TRUE) {
870 iface_desc_0 = R_REG(nci->osh, erom2ptr);
871 if (iface_desc_0 == ID_ENDMARKER) {
872 NCI_INFO(("\nnci_scan: Reached end of EROM2 with total cores=%d \n",
873 nci->num_cores));
874 break;
875 }
876
877 /* Save current Iface1 Addr for comparision */
878 cur_iface_desc_1_ptr = GET_NEXT_EROM_ADDR(erom2ptr);
879
880 /* Get CoreInfo, InterfaceCfg, CoreIdx */
881 core = nci_initial_parse(nci, cur_iface_desc_1_ptr, &core_idx);
882
883 core->desc = (interface_desc_t *)MALLOCZ(
884 nci->osh, (sizeof(*(core->desc)) * core->iface_cnt));
885 if (core->desc == NULL) {
886 NCI_ERROR(("nci_scan: Mem Alloc failed for Iface and Addr "
887 "Descriptor\n"));
888 err = NCI_NOMEM;
889 break;
890 }
891
892 for (iface_idx = 0u; iface_idx < core->iface_cnt; iface_idx++) {
893 desc = &core->desc[iface_idx];
894
895 iface_desc_0 = R_REG(nci->osh, erom2ptr);
896 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
897 iface_desc_1 = R_REG(nci->osh, erom2ptr);
898 NCI_INC_ADDR(erom2ptr, NCI_WORD_SIZE);
899
900 /* Interface Descriptor Register */
901 nci_save_iface1_reg(desc, iface_desc_1);
902 if (desc->master && desc->num_addr_reg) {
903 err = NCI_MASTER_INVALID_ADDR;
904 goto end;
905 }
906
907 wordoffset = GET_WORDOFFSET(iface_desc_1);
908
909 /* NodePointer Register */
910 desc->iface_desc_0 = iface_desc_0;
911 desc->node_ptr = GET_NODEPTR(iface_desc_0);
912 desc->node_type = GET_NODETYPE(iface_desc_0);
913
914 if (axi_wrapper && (sii->axi_num_wrappers < SI_MAX_AXI_WRAPPERS)) {
915 axi_wrapper[sii->axi_num_wrappers].mfg = CORE_MFG(core->coreinfo);
916 axi_wrapper[sii->axi_num_wrappers].cid = CORE_ID(core->coreinfo);
917 axi_wrapper[sii->axi_num_wrappers].rev = CORE_REV(core->coreinfo);
918 axi_wrapper[sii->axi_num_wrappers].wrapper_type = desc->master;
919 axi_wrapper[sii->axi_num_wrappers].wrapper_addr = desc->node_ptr;
920 sii->axi_num_wrappers++;
921 }
922
923 NCI_INFO(("nci_scan: %s NodePointer:%#x Type=%s NODEPTR=%#x \n",
924 desc->master?"Master":"Slave", desc->iface_desc_0,
925 desc->node_type?"NIC-400":"BOOKER", desc->node_ptr));
926
927 /* Slave Port Addresses */
928 if (!desc->master) {
929 erom2ptr = nci_save_slaveport_addr(nci, desc, erom2ptr);
930 if (erom2ptr == NULL) {
931 NCI_ERROR(("nci_scan: Invalid EROM2PTR\n"));
932 err = NCI_INVALID_EROM2PTR;
933 goto end;
934 }
935 }
936
937 /* Current loop ends with next iface_desc_0 */
938 }
939
940 if (wordoffset == 0u) {
941 NCI_INFO(("\nnci_scan: EROM PARSING found END 'wordoffset=%#x' "
942 "with total cores=%d \n", wordoffset, nci->num_cores));
943 break;
944 }
945 }
946 nci->scan_done = TRUE;
947
948 end:
949 if (err) {
950 NCI_ERROR(("nci_scan: Failed with Code %d\n", err));
951 nci->num_cores = 0;
952 ASSERT(0u);
953 }
954
955 return nci->num_cores;
956 }
957
958 /**
959 * Function : nci_dump_erom
960 * Description : Function dumps EROM from inforamtion cores in 'nci_info_t' data structure.
961 *
962 * @paramter[in]
963 * @nci : This is 'nci_info_t' data structure, where all EROM parsed Cores are saved.
964 *
965 * Return : void
966 */
967 void
BCMATTACHFN(nci_dump_erom)968 BCMATTACHFN(nci_dump_erom)(void *ctx)
969 {
970 nci_info_t *nci = (nci_info_t *)ctx;
971 nci_cores_t *core;
972 interface_desc_t *desc;
973 slave_port_t *sp;
974 uint32 core_idx, addr_idx, iface_idx;
975 uint32 core_info;
976
977 BCM_REFERENCE(core_info);
978
979 NCI_INFO(("\nnci_dump_erom: -- EROM Dump --\n"));
980 for (core_idx = 0u; core_idx < nci->num_cores; core_idx++) {
981 core = &nci->cores[core_idx];
982
983 /* Core Info Register */
984 core_info = core->coreinfo;
985 NCI_INFO(("\nnci_dump_erom: core_idx=%d COREINFO:%#x CId:%#x CUnit:%#x CRev=%#x "
986 "CMfg=%#x\n", core_idx, core_info, CORE_ID(core_info), core->coreunit,
987 CORE_REV(core_info), CORE_MFG(core_info)));
988
989 /* Interface Config Register */
990 NCI_INFO(("nci_dump_erom: IfaceCfg=%#x IfaceCnt=%#x \n",
991 core->iface_cfg, core->iface_cnt));
992
993 for (iface_idx = 0u; iface_idx < core->iface_cnt; iface_idx++) {
994 desc = &core->desc[iface_idx];
995 /* NodePointer Register */
996 NCI_INFO(("nci_dump_erom: %s iface_desc_0 Master=%#x MASTER_WRAP=%#x "
997 "Type=%s \n", desc->master?"Master":"Slave", desc->iface_desc_0,
998 desc->node_ptr,
999 (desc->node_type)?"NIC-400":"BOOKER"));
1000
1001 /* Interface Descriptor Register */
1002 NCI_INFO(("nci_dump_erom: %s InterfaceDesc:%#x WOffset=%#x NoAddrReg=%#x "
1003 "%s_Offset=%#x\n", desc->master?"Master":"Slave",
1004 desc->iface_desc_1, GET_WORDOFFSET(desc->iface_desc_1),
1005 desc->num_addr_reg,
1006 desc->coretype?"EROM1":"OOBR", GET_COREOFFSET(desc->iface_desc_1)));
1007
1008 /* Slave Port Addresses */
1009 sp = desc->sp;
1010 if (!sp) {
1011 continue;
1012 }
1013 for (addr_idx = 0u; addr_idx < desc->num_addr_reg; addr_idx++) {
1014 if (sp[addr_idx].extaddrl) {
1015 NCI_INFO(("nci_dump_erom: SlavePortAddr[%#x]: AddrDesc=%#x"
1016 " al=%#x ah=%#x extal=%#x extah=%#x\n", addr_idx,
1017 sp[addr_idx].adesc, sp[addr_idx].addrl,
1018 sp[addr_idx].addrh, sp[addr_idx].extaddrl,
1019 sp[addr_idx].extaddrh));
1020 } else {
1021 NCI_INFO(("nci_dump_erom: SlavePortAddr[%#x]: AddrDesc=%#x"
1022 " al=%#x ah=%#x\n", addr_idx, sp[addr_idx].adesc,
1023 sp[addr_idx].addrl, sp[addr_idx].addrh));
1024 }
1025 }
1026 }
1027 }
1028
1029 return;
1030 }
1031
1032 /*
1033 * Switch to 'coreidx', issue a single arbitrary 32bit register mask & set operation,
1034 * switch back to the original core, and return the new value.
1035 */
1036 uint
BCMPOSTTRAPFN(nci_corereg)1037 BCMPOSTTRAPFN(nci_corereg)(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1038 {
1039 uint origidx = 0;
1040 volatile uint32 *r = NULL;
1041 uint w;
1042 bcm_int_bitmask_t intr_val;
1043 bool fast = FALSE;
1044 si_info_t *sii = SI_INFO(sih);
1045 nci_info_t *nci = sii->nci_info;
1046 nci_cores_t *cores_info = &nci->cores[coreidx];
1047
1048 NCI_TRACE(("nci_corereg coreidx %u regoff %u mask %u val %u\n",
1049 coreidx, regoff, mask, val));
1050 ASSERT(GOODIDX(coreidx, nci->num_cores));
1051 ASSERT(regoff < SI_CORE_SIZE);
1052 ASSERT((val & ~mask) == 0);
1053
1054 if (coreidx >= SI_MAXCORES) {
1055 return 0;
1056 }
1057
1058 if (BUSTYPE(sih->bustype) == SI_BUS) {
1059 /* If internal bus, we can always get at everything */
1060 uint32 curmap = nci_get_curmap(nci, coreidx);
1061 BCM_REFERENCE(curmap);
1062
1063 fast = TRUE;
1064 /* map if does not exist */
1065 if (!cores_info->regs) {
1066 cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
1067 ASSERT(GOODREGS(cores_info->regs));
1068 }
1069 r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
1070 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1071 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1072
1073 if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
1074 /* Chipc registers are mapped at 12KB */
1075
1076 fast = TRUE;
1077 r = (volatile uint32 *)((volatile char *)sii->curmap +
1078 PCI_16KB0_CCREGS_OFFSET + regoff);
1079 } else if (sii->pub.buscoreidx == coreidx) {
1080 /* pci registers are at either in the last 2KB of an 8KB window
1081 * or, in pcie and pci rev 13 at 8KB
1082 */
1083 fast = TRUE;
1084 if (SI_FAST(sii)) {
1085 r = (volatile uint32 *)((volatile char *)sii->curmap +
1086 PCI_16KB0_PCIREGS_OFFSET + regoff);
1087 } else {
1088 r = (volatile uint32 *)((volatile char *)sii->curmap +
1089 ((regoff >= SBCONFIGOFF) ?
1090 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
1091 }
1092 }
1093 }
1094
1095 if (!fast) {
1096 INTR_OFF(sii, &intr_val);
1097
1098 /* save current core index */
1099 origidx = si_coreidx(&sii->pub);
1100
1101 /* switch core */
1102 r = (volatile uint32*)((volatile uchar*)nci_setcoreidx(&sii->pub, coreidx) +
1103 regoff);
1104 }
1105 ASSERT(r != NULL);
1106
1107 /* mask and set */
1108 if (mask || val) {
1109 w = (R_REG(sii->osh, r) & ~mask) | val;
1110 W_REG(sii->osh, r, w);
1111 }
1112
1113 /* readback */
1114 w = R_REG(sii->osh, r);
1115
1116 if (!fast) {
1117 /* restore core index */
1118 if (origidx != coreidx) {
1119 nci_setcoreidx(&sii->pub, origidx);
1120 }
1121 INTR_RESTORE(sii, &intr_val);
1122 }
1123
1124 return (w);
1125 }
1126
1127 uint
nci_corereg_writeonly(si_t * sih,uint coreidx,uint regoff,uint mask,uint val)1128 nci_corereg_writeonly(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
1129 {
1130 uint origidx = 0;
1131 volatile uint32 *r = NULL;
1132 uint w = 0;
1133 bcm_int_bitmask_t intr_val;
1134 bool fast = FALSE;
1135 si_info_t *sii = SI_INFO(sih);
1136 nci_info_t *nci = sii->nci_info;
1137 nci_cores_t *cores_info = &nci->cores[coreidx];
1138
1139 NCI_TRACE(("nci_corereg_writeonly() coreidx %u regoff %u mask %u val %u\n",
1140 coreidx, regoff, mask, val));
1141
1142 ASSERT(GOODIDX(coreidx, nci->num_cores));
1143 ASSERT(regoff < SI_CORE_SIZE);
1144 ASSERT((val & ~mask) == 0);
1145
1146 if (coreidx >= SI_MAXCORES) {
1147 return 0;
1148 }
1149
1150 if (BUSTYPE(sih->bustype) == SI_BUS) {
1151 /* If internal bus, we can always get at everything */
1152 uint32 curmap = nci_get_curmap(nci, coreidx);
1153 BCM_REFERENCE(curmap);
1154 fast = TRUE;
1155 /* map if does not exist */
1156 if (!cores_info->regs) {
1157 cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
1158 ASSERT(GOODREGS(cores_info->regs));
1159 }
1160 r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
1161 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1162 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1163
1164 if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
1165 /* Chipc registers are mapped at 12KB */
1166
1167 fast = TRUE;
1168 r = (volatile uint32 *)((volatile char *)sii->curmap +
1169 PCI_16KB0_CCREGS_OFFSET + regoff);
1170 } else if (sii->pub.buscoreidx == coreidx) {
1171 /* pci registers are at either in the last 2KB of an 8KB window
1172 * or, in pcie and pci rev 13 at 8KB
1173 */
1174 fast = TRUE;
1175 if (SI_FAST(sii)) {
1176 r = (volatile uint32 *)((volatile char *)sii->curmap +
1177 PCI_16KB0_PCIREGS_OFFSET + regoff);
1178 } else {
1179 r = (volatile uint32 *)((volatile char *)sii->curmap +
1180 ((regoff >= SBCONFIGOFF) ?
1181 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
1182 }
1183 }
1184 }
1185
1186 if (!fast) {
1187 INTR_OFF(sii, &intr_val);
1188
1189 /* save current core index */
1190 origidx = si_coreidx(&sii->pub);
1191
1192 /* switch core */
1193 r = (volatile uint32*) ((volatile uchar*) nci_setcoreidx(&sii->pub, coreidx) +
1194 regoff);
1195 }
1196 ASSERT(r != NULL);
1197
1198 /* mask and set */
1199 if (mask || val) {
1200 w = (R_REG(sii->osh, r) & ~mask) | val;
1201 W_REG(sii->osh, r, w);
1202 }
1203
1204 if (!fast) {
1205 /* restore core index */
1206 if (origidx != coreidx) {
1207 nci_setcoreidx(&sii->pub, origidx);
1208 }
1209
1210 INTR_RESTORE(sii, &intr_val);
1211 }
1212
1213 return (w);
1214 }
1215
1216 /*
1217 * If there is no need for fiddling with interrupts or core switches (typically silicon
1218 * back plane registers, pci registers and chipcommon registers), this function
1219 * returns the register offset on this core to a mapped address. This address can
1220 * be used for W_REG/R_REG directly.
1221 *
1222 * For accessing registers that would need a core switch, this function will return
1223 * NULL.
1224 */
1225 volatile uint32 *
nci_corereg_addr(si_t * sih,uint coreidx,uint regoff)1226 nci_corereg_addr(si_t *sih, uint coreidx, uint regoff)
1227 {
1228 volatile uint32 *r = NULL;
1229 bool fast = FALSE;
1230 si_info_t *sii = SI_INFO(sih);
1231 nci_info_t *nci = sii->nci_info;
1232 nci_cores_t *cores_info = &nci->cores[coreidx];
1233
1234 NCI_TRACE(("nci_corereg_addr() coreidx %u regoff %u\n", coreidx, regoff));
1235
1236 ASSERT(GOODIDX(coreidx, nci->num_cores));
1237 ASSERT(regoff < SI_CORE_SIZE);
1238
1239 if (coreidx >= SI_MAXCORES) {
1240 return 0;
1241 }
1242
1243 if (BUSTYPE(sih->bustype) == SI_BUS) {
1244 uint32 curmap = nci_get_curmap(nci, coreidx);
1245 BCM_REFERENCE(curmap);
1246
1247 /* If internal bus, we can always get at everything */
1248 fast = TRUE;
1249 /* map if does not exist */
1250 if (!cores_info->regs) {
1251 cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
1252 ASSERT(GOODREGS(cores_info->regs));
1253 }
1254 r = (volatile uint32 *)((volatile uchar *)cores_info->regs + regoff);
1255
1256 } else if (BUSTYPE(sih->bustype) == PCI_BUS) {
1257 /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
1258
1259 if ((cores_info->coreid == CC_CORE_ID) && SI_FAST(sii)) {
1260 /* Chipc registers are mapped at 12KB */
1261
1262 fast = TRUE;
1263 r = (volatile uint32 *)((volatile char *)sii->curmap +
1264 PCI_16KB0_CCREGS_OFFSET + regoff);
1265 } else if (sii->pub.buscoreidx == coreidx) {
1266 /* pci registers are at either in the last 2KB of an 8KB window
1267 * or, in pcie and pci rev 13 at 8KB
1268 */
1269 fast = TRUE;
1270 if (SI_FAST(sii)) {
1271 r = (volatile uint32 *)((volatile char *)sii->curmap +
1272 PCI_16KB0_PCIREGS_OFFSET + regoff);
1273 } else {
1274 r = (volatile uint32 *)((volatile char *)sii->curmap +
1275 ((regoff >= SBCONFIGOFF) ?
1276 PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + regoff);
1277 }
1278 }
1279 }
1280
1281 if (!fast) {
1282 ASSERT(sii->curidx == coreidx);
1283 r = (volatile uint32*) ((volatile uchar*)sii->curmap + regoff);
1284 }
1285
1286 return (r);
1287 }
1288
1289 uint
BCMPOSTTRAPFN(nci_findcoreidx)1290 BCMPOSTTRAPFN(nci_findcoreidx)(const si_t *sih, uint coreid, uint coreunit)
1291 {
1292 si_info_t *sii = SI_INFO(sih);
1293 nci_info_t *nci = sii->nci_info;
1294 uint core_idx;
1295
1296 NCI_TRACE(("nci_findcoreidx() coreid %u coreunit %u\n", coreid, coreunit));
1297
1298 for (core_idx = 0; core_idx < nci->num_cores; core_idx++) {
1299 if ((nci->cores[core_idx].coreid == coreid) &&
1300 (nci->cores[core_idx].coreunit == coreunit)) {
1301 return core_idx;
1302 }
1303 }
1304 return BADIDX;
1305 }
1306
1307 static uint32
_nci_get_slave_addr_size(nci_info_t * nci,uint coreidx,uint32 slave_port_idx,uint base_idx)1308 _nci_get_slave_addr_size(nci_info_t *nci, uint coreidx, uint32 slave_port_idx, uint base_idx)
1309 {
1310 uint32 size;
1311 uint32 add_desc;
1312
1313 NCI_TRACE(("_nci_get_slave_addr_size() coreidx %u slave_port_idx %u base_idx %u\n",
1314 coreidx, slave_port_idx, base_idx));
1315
1316 add_desc = nci->cores[coreidx].desc[slave_port_idx].sp[base_idx].adesc;
1317
1318 size = add_desc & SLAVEPORT_ADDR_SIZE_MASK;
1319 return ADDR_SIZE(size);
1320 }
1321
1322 static uint32
BCMPOSTTRAPFN(_nci_get_curmap)1323 BCMPOSTTRAPFN(_nci_get_curmap)(nci_info_t *nci, uint coreidx, uint slave_port_idx, uint base_idx)
1324 {
1325 /* TODO: Is handling of 64 bit addressing required */
1326 NCI_TRACE(("_nci_get_curmap coreidx %u slave_port_idx %u base_idx %u\n",
1327 coreidx, slave_port_idx, base_idx));
1328 return nci->cores[coreidx].desc[slave_port_idx].sp[base_idx].addrl;
1329 }
1330
1331 /* Get the interface descriptor which is connected to APB and return its address */
1332 static uint32
BCMPOSTTRAPFN(nci_get_curmap)1333 BCMPOSTTRAPFN(nci_get_curmap)(nci_info_t *nci, uint coreidx)
1334 {
1335 nci_cores_t *core_info = &nci->cores[coreidx];
1336 uint32 iface_idx;
1337
1338 NCI_TRACE(("nci_get_curmap coreidx %u\n", coreidx));
1339 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
1340 NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
1341 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
1342 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
1343
1344 /* If core is a Backplane or Bridge, then its slave port
1345 * will give the pointer to access registers.
1346 */
1347 if (!IS_MASTER(core_info->desc[iface_idx].iface_desc_1) &&
1348 (IS_BACKPLANE(core_info->coreinfo) ||
1349 APB_INF(core_info->desc[iface_idx]))) {
1350 return _nci_get_curmap(nci, coreidx, iface_idx, 0);
1351 }
1352 }
1353
1354 /* no valid slave port address is found */
1355 return NCI_BAD_REG;
1356 }
1357
1358 static uint32
BCMPOSTTRAPFN(_nci_get_curwrap)1359 BCMPOSTTRAPFN(_nci_get_curwrap)(nci_info_t *nci, uint coreidx, uint wrapper_idx)
1360 {
1361 return nci->cores[coreidx].desc[wrapper_idx].node_ptr;
1362 }
1363
1364 static uint32
BCMPOSTTRAPFN(nci_get_curwrap)1365 BCMPOSTTRAPFN(nci_get_curwrap)(nci_info_t *nci, uint coreidx)
1366 {
1367 nci_cores_t *core_info = &nci->cores[coreidx];
1368 uint32 iface_idx;
1369 NCI_TRACE(("nci_get_curwrap coreidx %u\n", coreidx));
1370 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
1371 NCI_TRACE(("nci_get_curwrap iface_idx %u BP_ID %u master %u\n",
1372 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
1373 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
1374 if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
1375 (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
1376 return _nci_get_curwrap(nci, coreidx, iface_idx);
1377 }
1378 }
1379
1380 /* no valid master wrapper found */
1381 return NCI_BAD_REG;
1382 }
1383
1384 static void
_nci_setcoreidx_pcie_bus(si_t * sih,volatile void ** regs,uint32 curmap,uint32 curwrap)1385 _nci_setcoreidx_pcie_bus(si_t *sih, volatile void **regs, uint32 curmap,
1386 uint32 curwrap)
1387 {
1388 si_info_t *sii = SI_INFO(sih);
1389
1390 *regs = sii->curmap;
1391 switch (sii->slice) {
1392 case 0: /* main/first slice */
1393 /* point bar0 window */
1394 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, PCIE_WRITE_SIZE, curmap);
1395 // TODO: why curwrap is zero i.e no master wrapper
1396 if (curwrap != 0) {
1397 if (PCIE_GEN2(sii)) {
1398 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2,
1399 PCIE_WRITE_SIZE, curwrap);
1400 } else {
1401 OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2,
1402 PCIE_WRITE_SIZE, curwrap);
1403 }
1404 }
1405 break;
1406 case 1: /* aux/second slice */
1407 /* PCIE GEN2 only for other slices */
1408 if (!PCIE_GEN2(sii)) {
1409 /* other slices not supported */
1410 NCI_ERROR(("pci gen not supported for slice 1\n"));
1411 ASSERT(0);
1412 break;
1413 }
1414
1415 /* 0x4000 - 0x4fff: enum space 0x5000 - 0x5fff: wrapper space */
1416
1417 *regs = (volatile uint8 *)*regs + PCI_SEC_BAR0_WIN_OFFSET;
1418 sii->curwrap = (void *)((uintptr)*regs + SI_CORE_SIZE);
1419
1420 /* point bar0 window */
1421 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN, PCIE_WRITE_SIZE, curmap);
1422 OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_CORE2_WIN2, PCIE_WRITE_SIZE, curwrap);
1423 break;
1424
1425 case 2: /* scan/third slice */
1426 /* PCIE GEN2 only for other slices */
1427 if (!PCIE_GEN2(sii)) {
1428 /* other slices not supported */
1429 NCI_ERROR(("pci gen not supported for slice 1\n"));
1430 ASSERT(0);
1431 break;
1432 }
1433 /* 0x9000 - 0x9fff: enum space 0xa000 - 0xafff: wrapper space */
1434 *regs = (volatile uint8 *)*regs + PCI_SEC_BAR0_WIN_OFFSET;
1435 sii->curwrap = (void *)((uintptr)*regs + SI_CORE_SIZE);
1436
1437 /* point bar0 window */
1438 nci_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WIN, ~0, curmap);
1439 nci_corereg(sih, sih->buscoreidx, PCIE_TER_BAR0_WRAPPER, ~0, curwrap);
1440 break;
1441 default:
1442 ASSERT(0);
1443 break;
1444 }
1445 }
1446
1447 static volatile void *
BCMPOSTTRAPFN(_nci_setcoreidx)1448 BCMPOSTTRAPFN(_nci_setcoreidx)(si_t *sih, uint coreidx)
1449 {
1450 si_info_t *sii = SI_INFO(sih);
1451 nci_info_t *nci = sii->nci_info;
1452 nci_cores_t *cores_info = &nci->cores[coreidx];
1453 uint32 curmap, curwrap;
1454 volatile void *regs = NULL;
1455
1456 NCI_TRACE(("_nci_setcoreidx coreidx %u\n", coreidx));
1457 if (!GOODIDX(coreidx, nci->num_cores)) {
1458 return (NULL);
1459 }
1460 /*
1461 * If the user has provided an interrupt mask enabled function,
1462 * then assert interrupts are disabled before switching the core.
1463 */
1464 ASSERT((sii->intrsenabled_fn == NULL) ||
1465 !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
1466
1467 curmap = nci_get_curmap(nci, coreidx);
1468 curwrap = nci_get_curwrap(nci, coreidx);
1469
1470 switch (BUSTYPE(sih->bustype)) {
1471 case SI_BUS:
1472 /* map if does not exist */
1473 if (!cores_info->regs) {
1474 cores_info->regs = REG_MAP(curmap, SI_CORE_SIZE);
1475 ASSERT(GOODREGS(cores_info->regs));
1476 }
1477 sii->curmap = regs = cores_info->regs;
1478 sii->curwrap = REG_MAP(curwrap, SI_CORE_SIZE);
1479 break;
1480
1481 case PCI_BUS:
1482 _nci_setcoreidx_pcie_bus(sih, ®s, curmap, curwrap);
1483 break;
1484
1485 default:
1486 NCI_ERROR(("_nci_stcoreidx Invalid bustype %d\n", BUSTYPE(sih->bustype)));
1487 break;
1488 }
1489 sii->curidx = coreidx;
1490 return regs;
1491 }
1492
1493 volatile void *
BCMPOSTTRAPFN(nci_setcoreidx)1494 BCMPOSTTRAPFN(nci_setcoreidx)(si_t *sih, uint coreidx)
1495 {
1496 return _nci_setcoreidx(sih, coreidx);
1497 }
1498
1499 volatile void *
BCMPOSTTRAPFN(nci_setcore)1500 BCMPOSTTRAPFN(nci_setcore)(si_t *sih, uint coreid, uint coreunit)
1501 {
1502 si_info_t *sii = SI_INFO(sih);
1503 nci_info_t *nci = sii->nci_info;
1504 uint core_idx;
1505
1506 NCI_TRACE(("nci_setcore coreidx %u coreunit %u\n", coreid, coreunit));
1507 core_idx = nci_findcoreidx(sih, coreid, coreunit);
1508
1509 if (!GOODIDX(core_idx, nci->num_cores)) {
1510 return (NULL);
1511 }
1512 return nci_setcoreidx(sih, core_idx);
1513 }
1514
1515 /* Get the value of the register at offset "offset" of currently configured core */
1516 uint
BCMPOSTTRAPFN(nci_get_wrap_reg)1517 BCMPOSTTRAPFN(nci_get_wrap_reg)(const si_t *sih, uint32 offset, uint32 mask, uint32 val)
1518 {
1519 const si_info_t *sii = SI_INFO(sih);
1520 uint32 *addr = (uint32 *) ((uchar *)(sii->curwrap) + offset);
1521 NCI_TRACE(("nci_wrap_reg offset %u mask %u val %u\n", offset, mask, val));
1522
1523 if (mask || val) {
1524 uint32 w = R_REG(sii->osh, addr);
1525 w &= ~mask;
1526 w |= val;
1527 W_REG(sii->osh, addr, w);
1528 }
1529 return (R_REG(sii->osh, addr));
1530 }
1531
1532 uint
nci_corevendor(const si_t * sih)1533 nci_corevendor(const si_t *sih)
1534 {
1535 const si_info_t *sii = SI_INFO(sih);
1536 nci_info_t *nci = sii->nci_info;
1537
1538 NCI_TRACE(("nci_corevendor coreidx %u\n", sii->curidx));
1539 return (nci->cores[sii->curidx].coreinfo & COREINFO_MFG_MASK) >> COREINFO_MFG_SHIFT;
1540 }
1541
1542 uint
BCMPOSTTRAPFN(nci_corerev)1543 BCMPOSTTRAPFN(nci_corerev)(const si_t *sih)
1544 {
1545 const si_info_t *sii = SI_INFO(sih);
1546 nci_info_t *nci = sii->nci_info;
1547 uint coreidx = sii->curidx;
1548
1549 NCI_TRACE(("nci_corerev coreidx %u\n", coreidx));
1550
1551 return (nci->cores[coreidx].coreinfo & COREINFO_REV_MASK) >> COREINFO_REV_SHIFT;
1552 }
1553
1554 uint
nci_corerev_minor(const si_t * sih)1555 nci_corerev_minor(const si_t *sih)
1556 {
1557 return (nci_core_sflags(sih, 0, 0) >> SISF_MINORREV_D11_SHIFT) &
1558 SISF_MINORREV_D11_MASK;
1559 }
1560
1561 uint
BCMPOSTTRAPFN(nci_coreid)1562 BCMPOSTTRAPFN(nci_coreid)(const si_t *sih, uint coreidx)
1563 {
1564 const si_info_t *sii = SI_INFO(sih);
1565 nci_info_t *nci = sii->nci_info;
1566
1567 NCI_TRACE(("nci_coreid coreidx %u\n", coreidx));
1568 return nci->cores[coreidx].coreid;
1569 }
1570
1571 /** return total coreunit of coreid or zero if not found */
1572 uint
BCMPOSTTRAPFN(nci_numcoreunits)1573 BCMPOSTTRAPFN(nci_numcoreunits)(const si_t *sih, uint coreid)
1574 {
1575 const si_info_t *sii = SI_INFO(sih);
1576 nci_info_t *nci = sii->nci_info;
1577 uint found = 0;
1578 uint i;
1579
1580 NCI_TRACE(("nci_numcoreunits coreidx %u\n", coreid));
1581
1582 for (i = 0; i < nci->num_cores; i++) {
1583 if (nci->cores[i].coreid == coreid) {
1584 found++;
1585 }
1586 }
1587
1588 return found;
1589 }
1590
1591 /* Return the address of the nth address space in the current core
1592 * Arguments:
1593 * sih : Pointer to struct si_t
1594 * spidx : slave port index
1595 * baidx : base address index
1596 */
1597 uint32
nci_addr_space(const si_t * sih,uint spidx,uint baidx)1598 nci_addr_space(const si_t *sih, uint spidx, uint baidx)
1599 {
1600 const si_info_t *sii = SI_INFO(sih);
1601 uint cidx;
1602
1603 NCI_TRACE(("nci_addr_space spidx %u baidx %u\n", spidx, baidx));
1604 cidx = sii->curidx;
1605 return _nci_get_curmap(sii->nci_info, cidx, spidx, baidx);
1606 }
1607
1608 /* Return the size of the nth address space in the current core
1609 * Arguments:
1610 * sih : Pointer to struct si_t
1611 * spidx : slave port index
1612 * baidx : base address index
1613 */
1614 uint32
nci_addr_space_size(const si_t * sih,uint spidx,uint baidx)1615 nci_addr_space_size(const si_t *sih, uint spidx, uint baidx)
1616 {
1617 const si_info_t *sii = SI_INFO(sih);
1618 uint cidx;
1619
1620 NCI_TRACE(("nci_addr_space_size spidx %u baidx %u\n", spidx, baidx));
1621
1622 cidx = sii->curidx;
1623 return _nci_get_slave_addr_size(sii->nci_info, cidx, spidx, baidx);
1624 }
1625
1626 /*
1627 * Performs soft reset of attached device.
1628 * Writes have the following effect:
1629 * 0b1 Request attached device to enter reset.
1630 * Write is ignored if it occurs before soft reset exit has occurred.
1631 *
1632 * 0b0 Request attached device to exit reset.
1633 * Write is ignored if it occurs before soft reset entry has occurred.
1634 *
1635 * Software can poll this register to determine whether soft reset entry or exit has occurred,
1636 * using the following values:
1637 * 0b1 Indicates that the device is in reset.
1638 * 0b0 Indicates that the device is not in reset.
1639 *
1640 *
1641 * Note
1642 * The register value updates to reflect a request for reset entry or reset exit,
1643 * but the update can only occur after required internal conditions are met.
1644 * Until these conditions are met, a read to the register returns the old value.
1645 * For example, outstanding transactions currently being handled must complete before
1646 * the register value updates.
1647 *
1648 * To ensure reset propagation within the device,
1649 * it is the responsibility of software to allow enough cycles after
1650 * soft reset assertion is reflected in the reset control register
1651 * before exiting soft reset by triggering a write of 0b0.
1652 * If this responsibility is not met, the behavior is undefined or unpredictable.
1653 *
1654 * When the register value is 0b1,
1655 * the external soft reset pin that connects to the attached AXI master or slave
1656 * device is asserted, using the correct polarity of the reset pin.
1657 * When the register value is 0b0, the external softreset
1658 * pin that connects to the attached AXI master or slave device is deasserted,
1659 * using the correct polarity of the reset pin.
1660 */
1661 static void
BCMPOSTTRAPFN(_nci_core_reset)1662 BCMPOSTTRAPFN(_nci_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
1663 {
1664 const si_info_t *sii = SI_INFO(sih);
1665 nci_info_t *nci = sii->nci_info;
1666 amni_regs_t *amni = (amni_regs_t *)(uintptr)sii->curwrap;
1667 volatile dmp_regs_t *io;
1668 volatile uint32* erom_base = 0u;
1669 uint32 orig_bar0_win1 = 0u;
1670 volatile uint32 dummy;
1671 volatile uint32 reg_read;
1672 uint32 dmp_write_value;
1673
1674 /* Point to OOBR base */
1675 switch (BUSTYPE(sih->bustype)) {
1676 case SI_BUS:
1677 erom_base = (volatile uint32*)REG_MAP(GET_OOBR_BASE(nci->cc_erom2base),
1678 SI_CORE_SIZE);
1679 break;
1680
1681 case PCI_BUS:
1682 /*
1683 * Save Original Bar0 Win1. In nci, the io registers dmpctrl & dmpstatus
1684 * registers are implemented in the EROM section. REF -
1685 * https://docs.google.com/document/d/1HE7hAmvdoNFSnMI7MKQV1qVrFBZVsgLdNcILNOA2C8c
1686 * This requires addition BAR0 windows mapping to erom section in chipcommon.
1687 */
1688 orig_bar0_win1 = OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN,
1689 PCI_ACCESS_SIZE);
1690
1691 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
1692 GET_OOBR_BASE(nci->cc_erom2base));
1693 erom_base = (volatile uint32*)sii->curmap;
1694 break;
1695
1696 default:
1697 NCI_ERROR(("_nci_core_reset Invalid bustype %d\n", BUSTYPE(sih->bustype)));
1698 break;
1699 }
1700
1701 /* Point to DMP Control */
1702 io = (dmp_regs_t*)(NCI_ADD_ADDR(erom_base, nci->cores[sii->curidx].dmp_regs_off));
1703
1704 NCI_TRACE(("_nci_core_reset reg 0x%p io %p\n", amni, io));
1705
1706 /* Put core into reset */
1707 W_REG(nci->osh, &amni->idm_reset_ctrl, NI_IDM_RESET_ENTRY);
1708
1709 /* poll for the reset to happen */
1710 while (TRUE) {
1711 /* Wait until reset is effective */
1712 SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
1713 NI_IDM_RESET_ENTRY), NCI_SPINWAIT_TIMEOUT);
1714
1715 if (reg_read == NI_IDM_RESET_ENTRY) {
1716 break;
1717 }
1718 }
1719
1720 dmp_write_value = (bits | resetbits | SICF_FGC | SICF_CLOCK_EN);
1721
1722 W_REG(nci->osh, &io->dmpctrl, dmp_write_value);
1723
1724 /* poll for the dmp_reg write to happen */
1725 while (TRUE) {
1726 /* Wait until reset is effective */
1727 SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) !=
1728 dmp_write_value), NCI_SPINWAIT_TIMEOUT);
1729 if (reg_read == dmp_write_value) {
1730 break;
1731 }
1732 }
1733
1734 /* take core out of reset */
1735 W_REG(nci->osh, &amni->idm_reset_ctrl, 0u);
1736
1737 /* poll for the core to come out of reset */
1738 while (TRUE) {
1739 /* Wait until reset is effected */
1740 SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
1741 NI_IDM_RESET_EXIT), NCI_SPINWAIT_TIMEOUT);
1742 if (reg_read == NI_IDM_RESET_EXIT) {
1743 break;
1744 }
1745 }
1746
1747 dmp_write_value = (bits | SICF_CLOCK_EN);
1748 W_REG(nci->osh, &io->dmpctrl, (bits | SICF_CLOCK_EN));
1749 /* poll for the core to come out of reset */
1750 while (TRUE) {
1751 SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) !=
1752 dmp_write_value), NCI_SPINWAIT_TIMEOUT);
1753 if (reg_read == dmp_write_value) {
1754 break;
1755 }
1756 }
1757
1758 dummy = R_REG(nci->osh, &io->dmpctrl);
1759 BCM_REFERENCE(dummy);
1760
1761 /* Point back to original base */
1762 if (BUSTYPE(sih->bustype) == PCI_BUS) {
1763 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
1764 }
1765 }
1766
1767 /* reset and re-enable a core
1768 */
1769 void
BCMPOSTTRAPFN(nci_core_reset)1770 BCMPOSTTRAPFN(nci_core_reset)(const si_t *sih, uint32 bits, uint32 resetbits)
1771 {
1772 const si_info_t *sii = SI_INFO(sih);
1773 int32 iface_idx = 0u;
1774 nci_info_t *nci = sii->nci_info;
1775 nci_cores_t *core = &nci->cores[sii->curidx];
1776
1777 /* If Wrapper is of NIC400, then call AI functionality */
1778 for (iface_idx = core->iface_cnt-1; iface_idx >= 0; iface_idx--) {
1779 if (!(BOOKER_INF(core->desc[iface_idx]) || NIC_INF(core->desc[iface_idx]))) {
1780 continue;
1781 }
1782 #ifdef BOOKER_NIC400_INF
1783 if (core->desc[iface_idx].node_type == NODE_TYPE_NIC400) {
1784 ai_core_reset_ext(sih, bits, resetbits);
1785 } else
1786 #endif /* BOOKER_NIC400_INF */
1787 {
1788 _nci_core_reset(sih, bits, resetbits);
1789 }
1790 }
1791 }
1792
1793 #ifdef BOOKER_NIC400_INF
1794 static int32
BCMPOSTTRAPFN(nci_find_first_wrapper_idx)1795 BCMPOSTTRAPFN(nci_find_first_wrapper_idx)(nci_info_t *nci, uint32 coreidx)
1796 {
1797 nci_cores_t *core_info = &nci->cores[coreidx];
1798 uint32 iface_idx;
1799
1800 NCI_TRACE(("nci_find_first_wrapper_idx %u\n", coreidx));
1801
1802 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
1803 NCI_INFO(("nci_find_first_wrapper_idx: %u BP_ID %u master %u\n",
1804 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
1805 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
1806
1807 if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
1808 (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
1809 return iface_idx;
1810 }
1811 }
1812
1813 /* no valid master wrapper found */
1814 return NCI_BAD_INDEX;
1815 }
1816 #endif /* BOOKER_NIC400_INF */
1817
1818 void
nci_core_disable(const si_t * sih,uint32 bits)1819 nci_core_disable(const si_t *sih, uint32 bits)
1820 {
1821 const si_info_t *sii = SI_INFO(sih);
1822 nci_info_t *nci = sii->nci_info;
1823 uint32 reg_read;
1824 volatile dmp_regs_t *io = NULL;
1825 uint32 orig_bar0_win1 = 0u;
1826 uint32 dmp_write_value;
1827 amni_regs_t *amni = (amni_regs_t *)(uintptr)sii->curwrap;
1828 nci_cores_t *core = &nci->cores[sii->curidx];
1829 int32 iface_idx;
1830
1831 NCI_TRACE(("nci_core_disable\n"));
1832
1833 BCM_REFERENCE(core);
1834 BCM_REFERENCE(iface_idx);
1835
1836 #ifdef BOOKER_NIC400_INF
1837 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
1838
1839 if (iface_idx < 0) {
1840 NCI_ERROR(("nci_core_disable: First Wrapper is not found\n"));
1841 ASSERT(0u);
1842 return;
1843 }
1844
1845 /* If Wrapper is of NIC400, then call AI functionality */
1846 if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
1847 return ai_core_disable(sih, bits);
1848 }
1849 #endif /* BOOKER_NIC400_INF */
1850
1851 ASSERT(GOODREGS(sii->curwrap));
1852 reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl);
1853
1854 /* if core is already in reset, just return */
1855 if (reg_read == NI_IDM_RESET_ENTRY) {
1856 return;
1857 }
1858
1859 /* Put core into reset */
1860 W_REG(nci->osh, &amni->idm_reset_ctrl, NI_IDM_RESET_ENTRY);
1861 while (TRUE) {
1862 /* Wait until reset is effected */
1863 SPINWAIT(((reg_read = R_REG(nci->osh, &amni->idm_reset_ctrl)) !=
1864 NI_IDM_RESET_ENTRY), NCI_SPINWAIT_TIMEOUT);
1865 if (reg_read == NI_IDM_RESET_ENTRY) {
1866 break;
1867 }
1868 }
1869
1870 /* Point to OOBR base */
1871 switch (BUSTYPE(sih->bustype)) {
1872 case SI_BUS:
1873 io = (volatile dmp_regs_t*)
1874 REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
1875 break;
1876
1877 case PCI_BUS:
1878 /* Save Original Bar0 Win1 */
1879 orig_bar0_win1 =
1880 OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
1881
1882 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
1883 GET_OOBR_BASE(nci->cc_erom2base));
1884 io = (volatile dmp_regs_t*)sii->curmap;
1885 break;
1886
1887 default:
1888 NCI_ERROR(("nci_core_disable Invalid bustype %d\n", BUSTYPE(sih->bustype)));
1889 break;
1890
1891 }
1892
1893 /* Point to DMP Control */
1894 io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
1895
1896 dmp_write_value = (bits | SICF_FGC | SICF_CLOCK_EN);
1897 W_REG(nci->osh, &io->dmpctrl, dmp_write_value);
1898
1899 /* poll for the dmp_reg write to happen */
1900 while (TRUE) {
1901 /* Wait until reset is effected */
1902 SPINWAIT(((reg_read = R_REG(nci->osh, &io->dmpctrl)) != dmp_write_value),
1903 NCI_SPINWAIT_TIMEOUT);
1904 if (reg_read == dmp_write_value) {
1905 break;
1906 }
1907 }
1908
1909 /* Point back to original base */
1910 if (BUSTYPE(sih->bustype) == PCI_BUS) {
1911 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE, orig_bar0_win1);
1912 }
1913 }
1914
1915 bool
BCMPOSTTRAPFN(nci_iscoreup)1916 BCMPOSTTRAPFN(nci_iscoreup)(const si_t *sih)
1917 {
1918 const si_info_t *sii = SI_INFO(sih);
1919 nci_info_t *nci = sii->nci_info;
1920 amni_regs_t *ni = (amni_regs_t *)(uintptr)sii->curwrap;
1921 uint32 reset_ctrl;
1922
1923 #ifdef BOOKER_NIC400_INF
1924 nci_cores_t *core = &nci->cores[sii->curidx];
1925 int32 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
1926
1927 if (iface_idx < 0) {
1928 NCI_ERROR(("nci_iscoreup: First Wrapper is not found\n"));
1929 ASSERT(0u);
1930 return FALSE;
1931 }
1932
1933 /* If Wrapper is of NIC400, then call AI functionality */
1934 if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
1935 return ai_iscoreup(sih);
1936 }
1937 #endif /* BOOKER_NIC400_INF */
1938
1939 NCI_TRACE(("nci_iscoreup\n"));
1940 reset_ctrl = R_REG(nci->osh, &ni->idm_reset_ctrl);
1941
1942 return (reset_ctrl == NI_IDM_RESET_ENTRY) ? FALSE : TRUE;
1943 }
1944
1945 /* TODO: OOB Router core is not available. Can be removed. */
1946 uint
nci_intflag(si_t * sih)1947 nci_intflag(si_t *sih)
1948 {
1949 return 0;
1950 }
1951
1952 uint
nci_flag(si_t * sih)1953 nci_flag(si_t *sih)
1954 {
1955 /* TODO: will be implemented if required for NCI */
1956 return 0;
1957 }
1958
1959 uint
nci_flag_alt(const si_t * sih)1960 nci_flag_alt(const si_t *sih)
1961 {
1962 /* TODO: will be implemented if required for NCI */
1963 return 0;
1964 }
1965
1966 void
BCMATTACHFN(nci_setint)1967 BCMATTACHFN(nci_setint)(const si_t *sih, int siflag)
1968 {
1969 BCM_REFERENCE(sih);
1970 BCM_REFERENCE(siflag);
1971
1972 /* TODO: Figure out how to set interrupt mask in nci */
1973 }
1974
1975 /* TODO: OOB Router core is not available. Can we remove or need an alternate implementation. */
1976 uint32
nci_oobr_baseaddr(const si_t * sih,bool second)1977 nci_oobr_baseaddr(const si_t *sih, bool second)
1978 {
1979 return 0;
1980 }
1981
1982 uint
nci_coreunit(const si_t * sih)1983 nci_coreunit(const si_t *sih)
1984 {
1985 const si_info_t *sii = SI_INFO(sih);
1986 nci_info_t *nci = sii->nci_info;
1987 nci_cores_t *cores = nci->cores;
1988 uint idx;
1989 uint coreid;
1990 uint coreunit;
1991 uint i;
1992
1993 coreunit = 0;
1994
1995 idx = sii->curidx;
1996
1997 ASSERT(GOODREGS(sii->curmap));
1998 coreid = nci_coreid(sih, sii->curidx);
1999
2000 /* count the cores of our type */
2001 for (i = 0; i < idx; i++) {
2002 if (cores[i].coreid == coreid) {
2003 coreunit++;
2004 }
2005 }
2006
2007 return (coreunit);
2008 }
2009
2010 uint
nci_corelist(const si_t * sih,uint coreid[])2011 nci_corelist(const si_t *sih, uint coreid[])
2012 {
2013 const si_info_t *sii = SI_INFO(sih);
2014 nci_info_t *nci = sii->nci_info;
2015 nci_cores_t *cores = nci->cores;
2016 uint32 i;
2017
2018 for (i = 0; i < sii->numcores; i++) {
2019 coreid[i] = cores[i].coreid;
2020 }
2021
2022 return (sii->numcores);
2023 }
2024
2025 /* Return the number of address spaces in current core */
2026 int
BCMATTACHFN(nci_numaddrspaces)2027 BCMATTACHFN(nci_numaddrspaces)(const si_t *sih)
2028 {
2029 /* TODO: Either save it or parse the EROM on demand, currently hardcode 2 */
2030 BCM_REFERENCE(sih);
2031
2032 return 2;
2033 }
2034
2035 /* The value of wrap_pos should be greater than 0 */
2036 /* wrapba, wrapba2 and wrapba3 */
2037 uint32
nci_get_nth_wrapper(const si_t * sih,int32 wrap_pos)2038 nci_get_nth_wrapper(const si_t *sih, int32 wrap_pos)
2039 {
2040 const si_info_t *sii = SI_INFO(sih);
2041 nci_info_t *nci = sii->nci_info;
2042 const nci_cores_t *core_info = &nci->cores[sii->curidx];
2043 uint32 iface_idx;
2044 uint32 addr = 0;
2045
2046 ASSERT(wrap_pos >= 0);
2047 if (wrap_pos < 0) {
2048 return addr;
2049 }
2050
2051 NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
2052 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
2053 NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
2054 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
2055 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
2056 /* hack for core idx 8, coreidx without APB Backplane ID */
2057 if (!IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
2058 continue;
2059 }
2060 /* TODO: Should the interface be only BOOKER or NIC is also fine. */
2061 if (GET_NODETYPE(core_info->desc[iface_idx].iface_desc_0) != NODE_TYPE_BOOKER) {
2062 continue;
2063 }
2064 /* Iterate till we do not get a wrapper at nth (wrap_pos) position */
2065 if (wrap_pos == 0) {
2066 break;
2067 }
2068 wrap_pos--;
2069 }
2070 if (iface_idx < core_info->iface_cnt) {
2071 addr = GET_NODEPTR(core_info->desc[iface_idx].iface_desc_0);
2072 }
2073 return addr;
2074 }
2075
2076 /* Get slave port address of the 0th slave (csp2ba) */
2077 uint32
nci_get_axi_addr(const si_t * sih,uint32 * size)2078 nci_get_axi_addr(const si_t *sih, uint32 *size)
2079 {
2080 const si_info_t *sii = SI_INFO(sih);
2081 nci_info_t *nci = sii->nci_info;
2082 const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[sii->curidx];
2083 uint32 iface_idx;
2084 uint32 addr = 0;
2085
2086 NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
2087 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
2088 NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
2089 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
2090 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
2091 if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
2092 continue;
2093 }
2094 if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_BOOKER) ||
2095 (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_NIC400)) {
2096 break;
2097 }
2098 }
2099 if (iface_idx < core_info->iface_cnt) {
2100 /*
2101 * TODO: Is there any case where we need to return the slave port address
2102 * corresponding to index other than 0.
2103 */
2104 if (&core_info->desc[iface_idx].sp[0] != NULL) {
2105 addr = core_info->desc[iface_idx].sp[0].addrl;
2106 if (size) {
2107 uint32 adesc = core_info->desc[iface_idx].sp[0].adesc;
2108 *size = SLAVEPORT_ADDR_SIZE(adesc);
2109 }
2110 }
2111 }
2112 return addr;
2113 }
2114
2115 /* spidx shouldbe the index of the slave port which we are expecting.
2116 * The value will vary from 0 to num_addr_reg.
2117 */
2118 /* coresba and coresba2 */
2119 uint32
nci_get_core_baaddr(const si_t * sih,uint32 * size,int32 baidx)2120 nci_get_core_baaddr(const si_t *sih, uint32 *size, int32 baidx)
2121 {
2122 const si_info_t *sii = SI_INFO(sih);
2123 nci_info_t *nci = sii->nci_info;
2124 const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[sii->curidx];
2125 uint32 iface_idx;
2126 uint32 addr = 0;
2127
2128 NCI_TRACE(("nci_get_curmap coreidx %u\n", sii->curidx));
2129 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
2130 NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
2131 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
2132 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
2133 /* hack for core idx 8, coreidx without APB Backplane ID */
2134 if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
2135 continue;
2136 }
2137 if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB1) ||
2138 (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB2)) {
2139 break;
2140 }
2141 }
2142 if (iface_idx < core_info->iface_cnt) {
2143 /*
2144 * TODO: Is there any case where we need to return the slave port address
2145 * corresponding to index other than 0.
2146 */
2147 if ((core_info->desc[iface_idx].num_addr_reg > baidx) &&
2148 (&core_info->desc[iface_idx].sp[baidx] != NULL)) {
2149 addr = core_info->desc[iface_idx].sp[baidx].addrl;
2150 if (size) {
2151 uint32 adesc = core_info->desc[iface_idx].sp[0].adesc;
2152 *size = SLAVEPORT_ADDR_SIZE(adesc);
2153 }
2154 }
2155 }
2156 return addr;
2157 }
2158
2159 uint32
nci_addrspace(const si_t * sih,uint spidx,uint baidx)2160 nci_addrspace(const si_t *sih, uint spidx, uint baidx)
2161 {
2162 if (spidx == CORE_SLAVE_PORT_0) {
2163 if (baidx == CORE_BASE_ADDR_0) {
2164 return nci_get_core_baaddr(sih, NULL, CORE_BASE_ADDR_0);
2165 } else if (baidx == CORE_BASE_ADDR_1) {
2166 return nci_get_core_baaddr(sih, NULL, CORE_BASE_ADDR_1);
2167 }
2168 } else if (spidx == CORE_SLAVE_PORT_1) {
2169 if (baidx == CORE_BASE_ADDR_0) {
2170 return nci_get_axi_addr(sih, NULL);
2171 }
2172 }
2173
2174 SI_ERROR(("nci_addrspace: Need to parse the erom again to find %d base addr"
2175 " in %d slave port\n", baidx, spidx));
2176
2177 return 0;
2178 }
2179
2180 uint32
BCMATTACHFN(nci_addrspacesize)2181 BCMATTACHFN(nci_addrspacesize)(const si_t *sih, uint spidx, uint baidx)
2182 {
2183 uint32 size = 0;
2184
2185 if (spidx == CORE_SLAVE_PORT_0) {
2186 if (baidx == CORE_BASE_ADDR_0) {
2187 nci_get_core_baaddr(sih, &size, CORE_BASE_ADDR_0);
2188 goto done;
2189 } else if (baidx == CORE_BASE_ADDR_1) {
2190 nci_get_core_baaddr(sih, &size, CORE_BASE_ADDR_1);
2191 goto done;
2192 }
2193 } else if (spidx == CORE_SLAVE_PORT_1) {
2194 if (baidx == CORE_BASE_ADDR_0) {
2195 nci_get_axi_addr(sih, &size);
2196 goto done;
2197 }
2198 }
2199
2200 SI_ERROR(("nci_addrspacesize: Need to parse the erom again to find %d"
2201 " base addr in %d slave port\n", baidx, spidx));
2202 done:
2203 return size;
2204 }
2205
2206 uint32
BCMPOSTTRAPFN(nci_core_cflags)2207 BCMPOSTTRAPFN(nci_core_cflags)(const si_t *sih, uint32 mask, uint32 val)
2208 {
2209 const si_info_t *sii = SI_INFO(sih);
2210 nci_info_t *nci = sii->nci_info;
2211 nci_cores_t *core = &nci->cores[sii->curidx];
2212 uint32 orig_bar0_win1 = 0;
2213 int32 iface_idx;
2214 uint32 w;
2215
2216 BCM_REFERENCE(iface_idx);
2217
2218 if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
2219 NCI_ERROR(("nci_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
2220 return 0;
2221 }
2222
2223 ASSERT(GOODREGS(sii->curwrap));
2224 ASSERT((val & ~mask) == 0);
2225
2226 #ifdef BOOKER_NIC400_INF
2227 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
2228 if (iface_idx < 0) {
2229 NCI_ERROR(("nci_core_cflags: First Wrapper is not found\n"));
2230 ASSERT(0u);
2231 return 0u;
2232 }
2233
2234 /* If Wrapper is of NIC400, then call AI functionality */
2235 if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
2236 aidmp_t *ai = sii->curwrap;
2237
2238 if (mask || val) {
2239 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
2240 W_REG(sii->osh, &ai->ioctrl, w);
2241 }
2242 return R_REG(sii->osh, &ai->ioctrl);
2243 } else
2244 #endif /* BOOKER_NIC400_INF */
2245 {
2246 volatile dmp_regs_t *io = sii->curwrap;
2247 volatile uint32 reg_read;
2248
2249 /* BOOKER */
2250 /* Point to OOBR base */
2251 switch (BUSTYPE(sih->bustype)) {
2252 case SI_BUS:
2253 io = (volatile dmp_regs_t*)
2254 REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
2255 break;
2256
2257 case PCI_BUS:
2258 /* Save Original Bar0 Win1 */
2259 orig_bar0_win1 =
2260 OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
2261
2262 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
2263 GET_OOBR_BASE(nci->cc_erom2base));
2264 io = (volatile dmp_regs_t*)sii->curmap;
2265 break;
2266
2267 default:
2268 NCI_ERROR(("nci_core_cflags Invalid bustype %d\n", BUSTYPE(sih->bustype)));
2269 break;
2270
2271 }
2272
2273 /* Point to DMP Control */
2274 io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
2275
2276 if (mask || val) {
2277 w = ((R_REG(sii->osh, &io->dmpctrl) & ~mask) | val);
2278 W_REG(sii->osh, &io->dmpctrl, w);
2279 }
2280
2281 reg_read = R_REG(sii->osh, &io->dmpctrl);
2282
2283 /* Point back to original base */
2284 if (BUSTYPE(sih->bustype) == PCI_BUS) {
2285 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
2286 PCI_ACCESS_SIZE, orig_bar0_win1);
2287 }
2288
2289 return reg_read;
2290 }
2291 }
2292
2293 void
BCMPOSTTRAPFN(nci_core_cflags_wo)2294 BCMPOSTTRAPFN(nci_core_cflags_wo)(const si_t *sih, uint32 mask, uint32 val)
2295 {
2296 const si_info_t *sii = SI_INFO(sih);
2297 nci_info_t *nci = sii->nci_info;
2298 nci_cores_t *core = &nci->cores[sii->curidx];
2299 volatile dmp_regs_t *io = sii->curwrap;
2300 uint32 orig_bar0_win1 = 0;
2301 int32 iface_idx;
2302 uint32 w;
2303
2304 BCM_REFERENCE(iface_idx);
2305
2306 if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
2307 NCI_ERROR(("nci_core_cflags: Accessing PMU DMP register (ioctrl)\n"));
2308 return;
2309 }
2310
2311 ASSERT(GOODREGS(sii->curwrap));
2312 ASSERT((val & ~mask) == 0);
2313
2314 #ifdef BOOKER_NIC400_INF
2315 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
2316 if (iface_idx < 0) {
2317 NCI_ERROR(("nci_core_cflags_wo: First Wrapper is not found\n"));
2318 ASSERT(0u);
2319 return;
2320 }
2321
2322 /* If Wrapper is of NIC400, then call AI functionality */
2323 if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
2324 aidmp_t *ai = sii->curwrap;
2325 if (mask || val) {
2326 w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
2327 W_REG(sii->osh, &ai->ioctrl, w);
2328 }
2329 } else
2330 #endif /* BOOKER_NIC400_INF */
2331 {
2332 /* BOOKER */
2333 /* Point to OOBR base */
2334 switch (BUSTYPE(sih->bustype)) {
2335 case SI_BUS:
2336 io = (volatile dmp_regs_t*)
2337 REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
2338 break;
2339
2340 case PCI_BUS:
2341 /* Save Original Bar0 Win1 */
2342 orig_bar0_win1 =
2343 OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
2344
2345 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
2346 GET_OOBR_BASE(nci->cc_erom2base));
2347 io = (volatile dmp_regs_t*)sii->curmap;
2348 break;
2349
2350 default:
2351 NCI_ERROR(("nci_core_cflags_wo Invalid bustype %d\n",
2352 BUSTYPE(sih->bustype)));
2353 break;
2354 }
2355
2356 /* Point to DMP Control */
2357 io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
2358
2359 if (mask || val) {
2360 w = ((R_REG(sii->osh, &io->dmpctrl) & ~mask) | val);
2361 W_REG(sii->osh, &io->dmpctrl, w);
2362 }
2363
2364 /* Point back to original base */
2365 if (BUSTYPE(sih->bustype) == PCI_BUS) {
2366 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
2367 PCI_ACCESS_SIZE, orig_bar0_win1);
2368 }
2369 }
2370 }
2371
2372 uint32
nci_core_sflags(const si_t * sih,uint32 mask,uint32 val)2373 nci_core_sflags(const si_t *sih, uint32 mask, uint32 val)
2374 {
2375 const si_info_t *sii = SI_INFO(sih);
2376 nci_info_t *nci = sii->nci_info;
2377 nci_cores_t *core = &nci->cores[sii->curidx];
2378 uint32 orig_bar0_win1 = 0;
2379 int32 iface_idx;
2380 uint32 w;
2381
2382 BCM_REFERENCE(iface_idx);
2383
2384 if ((core[sii->curidx].coreid) == PMU_CORE_ID) {
2385 NCI_ERROR(("nci_core_sflags: Accessing PMU DMP register (ioctrl)\n"));
2386 return 0;
2387 }
2388
2389 ASSERT(GOODREGS(sii->curwrap));
2390
2391 ASSERT((val & ~mask) == 0);
2392 ASSERT((mask & ~SISF_CORE_BITS) == 0);
2393
2394 #ifdef BOOKER_NIC400_INF
2395 iface_idx = nci_find_first_wrapper_idx(nci, sii->curidx);
2396 if (iface_idx < 0) {
2397 NCI_ERROR(("nci_core_sflags: First Wrapper is not found\n"));
2398 ASSERT(0u);
2399 return 0u;
2400 }
2401
2402 /* If Wrapper is of NIC400, then call AI functionality */
2403 if (core->desc[iface_idx].master && (core->desc[iface_idx].node_type == NODE_TYPE_NIC400)) {
2404 aidmp_t *ai = sii->curwrap;
2405 if (mask || val) {
2406 w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
2407 W_REG(sii->osh, &ai->iostatus, w);
2408 }
2409
2410 return R_REG(sii->osh, &ai->iostatus);
2411 } else
2412 #endif /* BOOKER_NIC400_INF */
2413 {
2414 volatile dmp_regs_t *io = sii->curwrap;
2415 volatile uint32 reg_read;
2416
2417 /* BOOKER */
2418 /* Point to OOBR base */
2419 switch (BUSTYPE(sih->bustype)) {
2420 case SI_BUS:
2421 io = (volatile dmp_regs_t*)
2422 REG_MAP(GET_OOBR_BASE(nci->cc_erom2base), SI_CORE_SIZE);
2423 break;
2424
2425 case PCI_BUS:
2426 /* Save Original Bar0 Win1 */
2427 orig_bar0_win1 =
2428 OSL_PCI_READ_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE);
2429
2430 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN, PCI_ACCESS_SIZE,
2431 GET_OOBR_BASE(nci->cc_erom2base));
2432 io = (volatile dmp_regs_t*)sii->curmap;
2433 break;
2434
2435 default:
2436 NCI_ERROR(("nci_core_sflags Invalid bustype %d\n", BUSTYPE(sih->bustype)));
2437 return 0u;
2438 }
2439
2440 /* Point to DMP Control */
2441 io = (dmp_regs_t*)(NCI_ADD_ADDR(io, nci->cores[sii->curidx].dmp_regs_off));
2442
2443 if (mask || val) {
2444 w = ((R_REG(sii->osh, &io->dmpstatus) & ~mask) | val);
2445 W_REG(sii->osh, &io->dmpstatus, w);
2446 }
2447
2448 reg_read = R_REG(sii->osh, &io->dmpstatus);
2449
2450 /* Point back to original base */
2451 if (BUSTYPE(sih->bustype) == PCI_BUS) {
2452 OSL_PCI_WRITE_CONFIG(nci->osh, PCI_BAR0_WIN,
2453 PCI_ACCESS_SIZE, orig_bar0_win1);
2454 }
2455
2456 return reg_read;
2457 }
2458 }
2459
2460 /* TODO: Used only by host */
2461 int
nci_backplane_access(si_t * sih,uint addr,uint size,uint * val,bool read)2462 nci_backplane_access(si_t *sih, uint addr, uint size, uint *val, bool read)
2463 {
2464 return 0;
2465 }
2466
2467 int
nci_backplane_access_64(si_t * sih,uint addr,uint size,uint64 * val,bool read)2468 nci_backplane_access_64(si_t *sih, uint addr, uint size, uint64 *val, bool read)
2469 {
2470 return 0;
2471 }
2472
2473 uint
nci_num_slaveports(const si_t * sih,uint coreidx)2474 nci_num_slaveports(const si_t *sih, uint coreidx)
2475 {
2476 const si_info_t *sii = SI_INFO(sih);
2477 nci_info_t *nci = sii->nci_info;
2478 const nci_cores_t *core_info = (const nci_cores_t *)&nci->cores[coreidx];
2479 uint32 iface_idx;
2480 uint32 numports = 0;
2481
2482 NCI_TRACE(("nci_get_curmap coreidx %u\n", coreidx));
2483 for (iface_idx = 0; iface_idx < core_info->iface_cnt; iface_idx++) {
2484 NCI_TRACE(("nci_get_curmap iface_idx %u BP_ID %u master %u\n",
2485 iface_idx, ID_BPID(core_info->desc[iface_idx].iface_desc_1),
2486 IS_MASTER(core_info->desc[iface_idx].iface_desc_1)));
2487 /* hack for core idx 8, coreidx without APB Backplane ID */
2488 if (IS_MASTER(core_info->desc[iface_idx].iface_desc_1)) {
2489 continue;
2490 }
2491 if ((ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB1) ||
2492 (ID_BPID(core_info->desc[iface_idx].iface_desc_1) == BP_APB2)) {
2493 break;
2494 }
2495 }
2496 if (iface_idx < core_info->iface_cnt) {
2497 numports = core_info->desc[iface_idx].num_addr_reg;
2498 }
2499 return numports;
2500 }
2501
2502 #if defined(BCMDBG) || defined(BCMDBG_DUMP) || defined(BCMDBG_PHYDUMP)
2503 void
nci_dumpregs(const si_t * sih,struct bcmstrbuf * b)2504 nci_dumpregs(const si_t *sih, struct bcmstrbuf *b)
2505 {
2506 const si_info_t *sii = SI_INFO(sih);
2507
2508 bcm_bprintf(b, "ChipNum:%x, ChipRev;%x, BusType:%x, BoardType:%x, BoardVendor:%x\n\n",
2509 sih->chip, sih->chiprev, sih->bustype, sih->boardtype, sih->boardvendor);
2510 BCM_REFERENCE(sii);
2511 /* TODO: Implement dump regs for nci. */
2512 }
2513 #endif /* BCMDBG || BCMDBG_DUMP || BCMDBG_PHYDUMP */
2514
2515 #ifdef BCMDBG
2516 static void
_nci_view(osl_t * osh,aidmp_t * ai,uint32 cid,uint32 addr,bool verbose)2517 _nci_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose)
2518 {
2519 /* TODO: This is WIP and will be developed once the
2520 * implementation is done based on the NCI.
2521 */
2522 }
2523
2524 void
nci_view(si_t * sih,bool verbose)2525 nci_view(si_t *sih, bool verbose)
2526 {
2527 const si_info_t *sii = SI_INFO(sih);
2528 nci_info_t *nci = sii->nci_info;
2529 const nci_cores_t *core_info = (const nci_cores_t *)nci->cores;
2530 osl_t *osh;
2531 /* TODO: We need to do the structure mapping correctly based on the BOOKER/NIC type */
2532 aidmp_t *ai;
2533 uint32 cid, addr;
2534
2535 ai = sii->curwrap;
2536 osh = sii->osh;
2537
2538 if ((core_info[sii->curidx].coreid) == PMU_CORE_ID) {
2539 SI_ERROR(("Cannot access pmu DMP\n"));
2540 return;
2541 }
2542 cid = core_info[sii->curidx].coreid;
2543 addr = nci_get_nth_wrapper(sih, 0u);
2544 _nci_view(osh, ai, cid, addr, verbose);
2545 }
2546
2547 void
nci_viewall(si_t * sih,bool verbose)2548 nci_viewall(si_t *sih, bool verbose)
2549 {
2550 const si_info_t *sii = SI_INFO(sih);
2551 nci_info_t *nci = sii->nci_info;
2552 const nci_cores_t *core_info = (const nci_cores_t *)nci->cores;
2553 osl_t *osh;
2554 aidmp_t *ai;
2555 uint32 cid, addr;
2556 uint i;
2557
2558 osh = sii->osh;
2559 for (i = 0; i < sii->numcores; i++) {
2560 nci_setcoreidx(sih, i);
2561
2562 if ((core_info[i].coreid) == PMU_CORE_ID) {
2563 SI_ERROR(("Skipping pmu DMP\n"));
2564 continue;
2565 }
2566 ai = sii->curwrap;
2567 cid = core_info[i].coreid;
2568 addr = nci_get_nth_wrapper(sih, 0u);
2569 _nci_view(osh, ai, cid, addr, verbose);
2570 }
2571 }
2572 #endif /* BCMDBG */
2573
2574 uint32
nci_clear_backplane_to(si_t * sih)2575 nci_clear_backplane_to(si_t *sih)
2576 {
2577 /* TODO: This is WIP and will be developed once the
2578 * implementation is done based on the NCI.
2579 */
2580 return 0;
2581 }
2582
2583 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
2584 static bool g_disable_backplane_logs = FALSE;
2585
2586 static uint32 last_axi_error = AXI_WRAP_STS_NONE;
2587 static uint32 last_axi_error_log_status = 0;
2588 static uint32 last_axi_error_core = 0;
2589 static uint32 last_axi_error_wrap = 0;
2590 static uint32 last_axi_errlog_lo = 0;
2591 static uint32 last_axi_errlog_hi = 0;
2592 static uint32 last_axi_errlog_id = 0;
2593
2594 /* slave error is ignored, so account for those cases */
2595 static uint32 si_ignore_errlog_cnt = 0;
2596
2597 static void
nci_reset_APB(const si_info_t * sii,aidmp_t * ai,int * ret,uint32 errlog_status,uint32 errlog_id)2598 nci_reset_APB(const si_info_t *sii, aidmp_t *ai, int *ret,
2599 uint32 errlog_status, uint32 errlog_id)
2600 {
2601 /* only reset APB Bridge on timeout (not slave error, or dec error) */
2602 switch (errlog_status & AIELS_ERROR_MASK) {
2603 case AIELS_SLAVE_ERR:
2604 NCI_PRINT(("AXI slave error\n"));
2605 *ret |= AXI_WRAP_STS_SLAVE_ERR;
2606 break;
2607
2608 case AIELS_TIMEOUT:
2609 nci_reset_axi_to(sii, ai);
2610 *ret |= AXI_WRAP_STS_TIMEOUT;
2611 break;
2612
2613 case AIELS_DECODE:
2614 NCI_PRINT(("AXI decode error\n"));
2615 #ifdef USE_HOSTMEM
2616 /* Ignore known cases of CR4 prefetch abort bugs */
2617 if ((errlog_id & (BCM_AXI_ID_MASK | BCM_AXI_ACCESS_TYPE_MASK)) !=
2618 (BCM43xx_AXI_ACCESS_TYPE_PREFETCH | BCM43xx_CR4_AXI_ID))
2619 #endif /* USE_HOSTMEM */
2620 {
2621 *ret |= AXI_WRAP_STS_DECODE_ERR;
2622 }
2623 break;
2624 default:
2625 ASSERT(0); /* should be impossible */
2626 }
2627 if (errlog_status & AIELS_MULTIPLE_ERRORS) {
2628 NCI_PRINT(("Multiple AXI Errors\n"));
2629 /* Set multiple errors bit only if actual error is not ignored */
2630 if (*ret) {
2631 *ret |= AXI_WRAP_STS_MULTIPLE_ERRORS;
2632 }
2633 }
2634 return;
2635 }
2636 /*
2637 * API to clear the back plane timeout per core.
2638 * Caller may passs optional wrapper address. If present this will be used as
2639 * the wrapper base address. If wrapper base address is provided then caller
2640 * must provide the coreid also.
2641 * If both coreid and wrapper is zero, then err status of current bridge
2642 * will be verified.
2643 */
2644
2645 uint32
nci_clear_backplane_to_per_core(si_t * sih,uint coreid,uint coreunit,void * wrap)2646 nci_clear_backplane_to_per_core(si_t *sih, uint coreid, uint coreunit, void *wrap)
2647 {
2648 int ret = AXI_WRAP_STS_NONE;
2649 aidmp_t *ai = NULL;
2650 uint32 errlog_status = 0;
2651 const si_info_t *sii = SI_INFO(sih);
2652 uint32 errlog_lo = 0, errlog_hi = 0, errlog_id = 0, errlog_flags = 0;
2653 uint32 current_coreidx = si_coreidx(sih);
2654 uint32 target_coreidx = nci_findcoreidx(sih, coreid, coreunit);
2655
2656 #if defined(AXI_TIMEOUTS_NIC)
2657 si_axi_error_t * axi_error = sih->err_info ?
2658 &sih->err_info->axi_error[sih->err_info->count] : NULL;
2659 #endif /* AXI_TIMEOUTS_NIC */
2660 bool restore_core = FALSE;
2661
2662 if ((sii->axi_num_wrappers == 0) ||
2663 #ifdef AXI_TIMEOUTS_NIC
2664 (!PCIE(sii)) ||
2665 #endif /* AXI_TIMEOUTS_NIC */
2666 FALSE) {
2667 SI_VMSG(("nci_clear_backplane_to_per_core, axi_num_wrappers:%d, Is_PCIE:%d,"
2668 " BUS_TYPE:%d, ID:%x\n",
2669 sii->axi_num_wrappers, PCIE(sii),
2670 BUSTYPE(sii->pub.bustype), sii->pub.buscoretype));
2671 return AXI_WRAP_STS_NONE;
2672 }
2673
2674 if (wrap != NULL) {
2675 ai = (aidmp_t *)wrap;
2676 } else if (coreid && (target_coreidx != current_coreidx)) {
2677 if (nci_setcoreidx(sih, target_coreidx) == NULL) {
2678 /* Unable to set the core */
2679 NCI_PRINT(("Set Code Failed: coreid:%x, unit:%d, target_coreidx:%d\n",
2680 coreid, coreunit, target_coreidx));
2681 errlog_lo = target_coreidx;
2682 ret = AXI_WRAP_STS_SET_CORE_FAIL;
2683 goto end;
2684 }
2685 restore_core = TRUE;
2686 ai = (aidmp_t *)si_wrapperregs(sih);
2687 } else {
2688 /* Read error status of current wrapper */
2689 ai = (aidmp_t *)si_wrapperregs(sih);
2690
2691 /* Update CoreID to current Code ID */
2692 coreid = nci_coreid(sih, sii->curidx);
2693 }
2694
2695 /* read error log status */
2696 errlog_status = R_REG(sii->osh, &ai->errlogstatus);
2697
2698 if (errlog_status == ID32_INVALID) {
2699 /* Do not try to peek further */
2700 NCI_PRINT(("nci_clear_backplane_to_per_core, errlogstatus:%x - "
2701 "Slave Wrapper:%x\n", errlog_status, coreid));
2702 ret = AXI_WRAP_STS_WRAP_RD_ERR;
2703 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2704 goto end;
2705 }
2706
2707 if ((errlog_status & AIELS_ERROR_MASK) != 0) {
2708 uint32 tmp;
2709 uint32 count = 0;
2710 /* set ErrDone to clear the condition */
2711 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2712
2713 /* SPINWAIT on errlogstatus timeout status bits */
2714 while ((tmp = R_REG(sii->osh, &ai->errlogstatus)) & AIELS_ERROR_MASK) {
2715
2716 if (tmp == ID32_INVALID) {
2717 NCI_PRINT(("nci_clear_backplane_to_per_core: prev errlogstatus:%x,"
2718 " errlogstatus:%x\n",
2719 errlog_status, tmp));
2720 ret = AXI_WRAP_STS_WRAP_RD_ERR;
2721
2722 errlog_lo = (uint32)(uintptr)&ai->errlogstatus;
2723 goto end;
2724 }
2725 /*
2726 * Clear again, to avoid getting stuck in the loop, if a new error
2727 * is logged after we cleared the first timeout
2728 */
2729 W_REG(sii->osh, &ai->errlogdone, AIELD_ERRDONE_MASK);
2730
2731 count++;
2732 OSL_DELAY(10);
2733 if ((10 * count) > AI_REG_READ_TIMEOUT) {
2734 errlog_status = tmp;
2735 break;
2736 }
2737 }
2738
2739 errlog_lo = R_REG(sii->osh, &ai->errlogaddrlo);
2740 errlog_hi = R_REG(sii->osh, &ai->errlogaddrhi);
2741 errlog_id = R_REG(sii->osh, &ai->errlogid);
2742 errlog_flags = R_REG(sii->osh, &ai->errlogflags);
2743
2744 /* we are already in the error path, so OK to check for the slave error */
2745 if (nci_ignore_errlog(sii, ai, errlog_lo, errlog_hi, errlog_id, errlog_status)) {
2746 si_ignore_errlog_cnt++;
2747 goto end;
2748 }
2749
2750 nci_reset_APB(sii, ai, &ret, errlog_status, errlog_id);
2751
2752 NCI_PRINT(("\tCoreID: %x\n", coreid));
2753 NCI_PRINT(("\t errlog: lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x"
2754 ", status 0x%08x\n",
2755 errlog_lo, errlog_hi, errlog_id, errlog_flags,
2756 errlog_status));
2757 }
2758
2759 end:
2760 if (ret != AXI_WRAP_STS_NONE) {
2761 last_axi_error = ret;
2762 last_axi_error_log_status = errlog_status;
2763 last_axi_error_core = coreid;
2764 last_axi_error_wrap = (uint32)ai;
2765 last_axi_errlog_lo = errlog_lo;
2766 last_axi_errlog_hi = errlog_hi;
2767 last_axi_errlog_id = errlog_id;
2768 }
2769
2770 #if defined(AXI_TIMEOUTS_NIC)
2771 if (axi_error && (ret != AXI_WRAP_STS_NONE)) {
2772 axi_error->error = ret;
2773 axi_error->coreid = coreid;
2774 axi_error->errlog_lo = errlog_lo;
2775 axi_error->errlog_hi = errlog_hi;
2776 axi_error->errlog_id = errlog_id;
2777 axi_error->errlog_flags = errlog_flags;
2778 axi_error->errlog_status = errlog_status;
2779 sih->err_info->count++;
2780
2781 if (sih->err_info->count == SI_MAX_ERRLOG_SIZE) {
2782 sih->err_info->count = SI_MAX_ERRLOG_SIZE - 1;
2783 NCI_PRINT(("AXI Error log overflow\n"));
2784 }
2785 }
2786 #endif /* AXI_TIMEOUTS_NIC */
2787
2788 if (restore_core) {
2789 if (nci_setcoreidx(sih, current_coreidx) == NULL) {
2790 /* Unable to set the core */
2791 return ID32_INVALID;
2792 }
2793 }
2794 return ret;
2795 }
2796
2797 /* TODO: It needs to be handled based on BOOKER/NCI DMP. */
2798 /* reset AXI timeout */
2799 static void
nci_reset_axi_to(const si_info_t * sii,aidmp_t * ai)2800 nci_reset_axi_to(const si_info_t *sii, aidmp_t *ai)
2801 {
2802 /* reset APB Bridge */
2803 OR_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
2804 /* sync write */
2805 (void)R_REG(sii->osh, &ai->resetctrl);
2806 /* clear Reset bit */
2807 AND_REG(sii->osh, &ai->resetctrl, ~(AIRC_RESET));
2808 /* sync write */
2809 (void)R_REG(sii->osh, &ai->resetctrl);
2810 NCI_PRINT(("AXI timeout\n"));
2811 if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) {
2812 NCI_PRINT(("reset failed on wrapper %p\n", ai));
2813 g_disable_backplane_logs = TRUE;
2814 }
2815 }
2816
2817 void
nci_wrapper_get_last_error(const si_t * sih,uint32 * error_status,uint32 * core,uint32 * lo,uint32 * hi,uint32 * id)2818 nci_wrapper_get_last_error(const si_t *sih, uint32 *error_status, uint32 *core, uint32 *lo,
2819 uint32 *hi, uint32 *id)
2820 {
2821 *error_status = last_axi_error_log_status;
2822 *core = last_axi_error_core;
2823 *lo = last_axi_errlog_lo;
2824 *hi = last_axi_errlog_hi;
2825 *id = last_axi_errlog_id;
2826 }
2827
2828 uint32
nci_get_axi_timeout_reg(void)2829 nci_get_axi_timeout_reg(void)
2830 {
2831 return (GOODREGS(last_axi_errlog_lo) ? last_axi_errlog_lo : 0);
2832 }
2833 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
2834
2835 /* TODO: This function should be able to handle NIC as well as BOOKER */
2836 bool
nci_ignore_errlog(const si_info_t * sii,const aidmp_t * ai,uint32 lo_addr,uint32 hi_addr,uint32 err_axi_id,uint32 errsts)2837 nci_ignore_errlog(const si_info_t *sii, const aidmp_t *ai,
2838 uint32 lo_addr, uint32 hi_addr, uint32 err_axi_id, uint32 errsts)
2839 {
2840 uint32 ignore_errsts = AIELS_SLAVE_ERR;
2841 uint32 ignore_errsts_2 = 0;
2842 uint32 ignore_hi = BT_CC_SPROM_BADREG_HI;
2843 uint32 ignore_lo = BT_CC_SPROM_BADREG_LO;
2844 uint32 ignore_size = BT_CC_SPROM_BADREG_SIZE;
2845 bool address_check = TRUE;
2846 uint32 axi_id = 0;
2847 uint32 axi_id2 = 0;
2848 bool extd_axi_id_mask = FALSE;
2849 uint32 axi_id_mask;
2850
2851 NCI_PRINT(("err check: core %p, error %d, axi id 0x%04x, addr(0x%08x:%08x)\n",
2852 ai, errsts, err_axi_id, hi_addr, lo_addr));
2853
2854 /* ignore the BT slave errors if the errlog is to chipcommon addr 0x190 */
2855 switch (CHIPID(sii->pub.chip)) {
2856 case BCM4397_CHIP_GRPID: /* TODO: Are these IDs same for 4397 as well? */
2857 #ifdef BTOVERPCIE
2858 axi_id = BCM4378_BT_AXI_ID;
2859 /* For BT over PCIE, ignore any slave error from BT. */
2860 /* No need to check any address range */
2861 address_check = FALSE;
2862 #endif /* BTOVERPCIE */
2863 axi_id2 = BCM4378_ARM_PREFETCH_AXI_ID;
2864 extd_axi_id_mask = TRUE;
2865 ignore_errsts_2 = AIELS_DECODE;
2866 break;
2867 default:
2868 return FALSE;
2869 }
2870
2871 axi_id_mask = extd_axi_id_mask ? AI_ERRLOGID_AXI_ID_MASK_EXTD : AI_ERRLOGID_AXI_ID_MASK;
2872
2873 /* AXI ID check */
2874 err_axi_id &= axi_id_mask;
2875 errsts &= AIELS_ERROR_MASK;
2876
2877 /* check the ignore error cases. 2 checks */
2878 if (!(((err_axi_id == axi_id) && (errsts == ignore_errsts)) ||
2879 ((err_axi_id == axi_id2) && (errsts == ignore_errsts_2)))) {
2880 /* not the error ignore cases */
2881 return FALSE;
2882
2883 }
2884
2885 /* check the specific address checks now, if specified */
2886 if (address_check) {
2887 /* address range check */
2888 if ((hi_addr != ignore_hi) ||
2889 (lo_addr < ignore_lo) || (lo_addr >= (ignore_lo + ignore_size))) {
2890 return FALSE;
2891 }
2892 }
2893
2894 NCI_PRINT(("err check: ignored\n"));
2895 return TRUE;
2896 }
2897
2898 /* TODO: Check the CORE to AXI ID mapping for 4397 */
2899 uint32
nci_findcoreidx_by_axiid(const si_t * sih,uint32 axiid)2900 nci_findcoreidx_by_axiid(const si_t *sih, uint32 axiid)
2901 {
2902 uint coreid = 0;
2903 uint coreunit = 0;
2904 const nci_axi_to_coreidx_t *axi2coreidx = NULL;
2905 switch (CHIPID(sih->chip)) {
2906 case BCM4397_CHIP_GRPID:
2907 axi2coreidx = axi2coreidx_4397;
2908 break;
2909 default:
2910 NCI_PRINT(("Chipid mapping not found\n"));
2911 break;
2912 }
2913
2914 if (!axi2coreidx) {
2915 return (BADIDX);
2916 }
2917
2918 coreid = axi2coreidx[axiid].coreid;
2919 coreunit = axi2coreidx[axiid].coreunit;
2920
2921 return nci_findcoreidx(sih, coreid, coreunit);
2922 }
2923
nci_coreaddrspaceX(const si_t * sih,uint asidx,uint32 * addr,uint32 * size)2924 void nci_coreaddrspaceX(const si_t *sih, uint asidx, uint32 *addr, uint32 *size)
2925 {
2926 /* Adding just a wrapper. Will implement when required. */
2927 }
2928
2929 /*
2930 * this is not declared as static const, although that is the right thing to do
2931 * reason being if declared as static const, compile/link process would that in
2932 * read only section...
2933 * currently this code/array is used to identify the registers which are dumped
2934 * during trap processing
2935 * and usually for the trap buffer, .rodata buffer is reused, so for now just static
2936 */
2937 /* TODO: Should we do another mapping for BOOKER and used correct one based on type of DMP. */
2938 #ifdef DONGLEBUILD
2939 static uint32 BCMPOST_TRAP_RODATA(wrapper_offsets_to_dump)[] = {
2940 OFFSETOF(aidmp_t, ioctrl),
2941 OFFSETOF(aidmp_t, iostatus),
2942 OFFSETOF(aidmp_t, resetctrl),
2943 OFFSETOF(aidmp_t, resetstatus),
2944 OFFSETOF(aidmp_t, resetreadid),
2945 OFFSETOF(aidmp_t, resetwriteid),
2946 OFFSETOF(aidmp_t, errlogctrl),
2947 OFFSETOF(aidmp_t, errlogdone),
2948 OFFSETOF(aidmp_t, errlogstatus),
2949 OFFSETOF(aidmp_t, errlogaddrlo),
2950 OFFSETOF(aidmp_t, errlogaddrhi),
2951 OFFSETOF(aidmp_t, errlogid),
2952 OFFSETOF(aidmp_t, errloguser),
2953 OFFSETOF(aidmp_t, errlogflags),
2954 OFFSETOF(aidmp_t, itipoobaout),
2955 OFFSETOF(aidmp_t, itipoobbout),
2956 OFFSETOF(aidmp_t, itipoobcout),
2957 OFFSETOF(aidmp_t, itipoobdout)
2958 };
2959
2960 static uint32
BCMRAMFN(nci_get_sizeof_wrapper_offsets_to_dump)2961 BCMRAMFN(nci_get_sizeof_wrapper_offsets_to_dump)(void)
2962 {
2963 return (sizeof(wrapper_offsets_to_dump));
2964 }
2965
2966 static uint32
BCMRAMFN(nci_get_wrapper_base_addr)2967 BCMRAMFN(nci_get_wrapper_base_addr)(uint32 **offset)
2968 {
2969 uint32 arr_size = ARRAYSIZE(wrapper_offsets_to_dump);
2970
2971 *offset = &wrapper_offsets_to_dump[0];
2972 return arr_size;
2973 }
2974
2975 #ifdef UART_TRAP_DBG
2976 /* TODO: Is br_wrapba populated for 4397 NCI? */
2977 void
nci_dump_APB_Bridge_registers(const si_t * sih)2978 nci_dump_APB_Bridge_registers(const si_t *sih)
2979 {
2980 aidmp_t *ai;
2981 const si_info_t *sii = SI_INFO(sih);
2982
2983 ai = (aidmp_t *)sii->br_wrapba[0];
2984 printf("APB Bridge 0\n");
2985 printf("lo 0x%08x, hi 0x%08x, id 0x%08x, flags 0x%08x",
2986 R_REG(sii->osh, &ai->errlogaddrlo),
2987 R_REG(sii->osh, &ai->errlogaddrhi),
2988 R_REG(sii->osh, &ai->errlogid),
2989 R_REG(sii->osh, &ai->errlogflags));
2990 printf("\n status 0x%08x\n", R_REG(sii->osh, &ai->errlogstatus));
2991 }
2992 #endif /* UART_TRAP_DBG */
2993
2994 uint32
BCMATTACHFN(nci_wrapper_dump_buf_size)2995 BCMATTACHFN(nci_wrapper_dump_buf_size)(const si_t *sih)
2996 {
2997 uint32 buf_size = 0;
2998 uint32 wrapper_count = 0;
2999 const si_info_t *sii = SI_INFO(sih);
3000
3001 wrapper_count = sii->axi_num_wrappers;
3002 if (wrapper_count == 0) {
3003 return 0;
3004 }
3005
3006 /* cnt indicates how many registers, tag_id 0 will say these are address/value */
3007 /* address/value pairs */
3008 buf_size += 2 * (nci_get_sizeof_wrapper_offsets_to_dump() * wrapper_count);
3009
3010 return buf_size;
3011 }
3012
3013 uint32*
nci_wrapper_dump_binary_one(const si_info_t * sii,uint32 * p32,uint32 wrap_ba)3014 nci_wrapper_dump_binary_one(const si_info_t *sii, uint32 *p32, uint32 wrap_ba)
3015 {
3016 uint i;
3017 uint32 *addr;
3018 uint32 arr_size;
3019 uint32 *offset_base;
3020
3021 arr_size = nci_get_wrapper_base_addr(&offset_base);
3022
3023 for (i = 0; i < arr_size; i++) {
3024 addr = (uint32 *)(wrap_ba + *(offset_base + i));
3025 *p32++ = (uint32)addr;
3026 *p32++ = R_REG(sii->osh, addr);
3027 }
3028 return p32;
3029 }
3030
3031 uint32
nci_wrapper_dump_binary(const si_t * sih,uchar * p)3032 nci_wrapper_dump_binary(const si_t *sih, uchar *p)
3033 {
3034 uint32 *p32 = (uint32 *)p;
3035 uint32 i;
3036 const si_info_t *sii = SI_INFO(sih);
3037
3038 for (i = 0; i < sii->axi_num_wrappers; i++) {
3039 p32 = nci_wrapper_dump_binary_one(sii, p32, sii->axi_wrapper[i].wrapper_addr);
3040 }
3041 return 0;
3042 }
3043
3044 #if defined(ETD)
3045 uint32
nci_wrapper_dump_last_timeout(const si_t * sih,uint32 * error,uint32 * core,uint32 * ba,uchar * p)3046 nci_wrapper_dump_last_timeout(const si_t *sih, uint32 *error, uint32 *core, uint32 *ba, uchar *p)
3047 {
3048 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
3049 uint32 *p32;
3050 uint32 wrap_ba = last_axi_error_wrap;
3051 uint i;
3052 uint32 *addr;
3053
3054 const si_info_t *sii = SI_INFO(sih);
3055
3056 if (last_axi_error != AXI_WRAP_STS_NONE) {
3057 if (wrap_ba) {
3058 p32 = (uint32 *)p;
3059 uint32 arr_size;
3060 uint32 *offset_base;
3061
3062 arr_size = nci_get_wrapper_base_addr(&offset_base);
3063 for (i = 0; i < arr_size; i++) {
3064 addr = (uint32 *)(wrap_ba + *(offset_base + i));
3065 *p32++ = R_REG(sii->osh, addr);
3066 }
3067 }
3068 *error = last_axi_error;
3069 *core = last_axi_error_core;
3070 *ba = wrap_ba;
3071 }
3072 #else
3073 *error = 0;
3074 *core = 0;
3075 *ba = 0;
3076 #endif /* AXI_TIMEOUTS || AXI_TIMEOUTS_NIC */
3077 return 0;
3078 }
3079 #endif /* ETD */
3080
3081 bool
nci_check_enable_backplane_log(const si_t * sih)3082 nci_check_enable_backplane_log(const si_t *sih)
3083 {
3084 #if defined (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC)
3085 if (g_disable_backplane_logs) {
3086 return FALSE;
3087 }
3088 else {
3089 return TRUE;
3090 }
3091 #else /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
3092 return FALSE;
3093 #endif /* (AXI_TIMEOUTS) || defined (AXI_TIMEOUTS_NIC) */
3094 }
3095 #endif /* DONGLEBUILD */
3096