1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is part of the Chelsio T4 Ethernet driver for Linux.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/delay.h>
36*4882a593Smuzhiyun #include "cxgb4.h"
37*4882a593Smuzhiyun #include "t4_regs.h"
38*4882a593Smuzhiyun #include "t4_values.h"
39*4882a593Smuzhiyun #include "t4fw_api.h"
40*4882a593Smuzhiyun #include "t4fw_version.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * t4_wait_op_done_val - wait until an operation is completed
44*4882a593Smuzhiyun * @adapter: the adapter performing the operation
45*4882a593Smuzhiyun * @reg: the register to check for completion
46*4882a593Smuzhiyun * @mask: a single-bit field within @reg that indicates completion
47*4882a593Smuzhiyun * @polarity: the value of the field when the operation is completed
48*4882a593Smuzhiyun * @attempts: number of check iterations
49*4882a593Smuzhiyun * @delay: delay in usecs between iterations
50*4882a593Smuzhiyun * @valp: where to store the value of the register at completion time
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * Wait until an operation is completed by checking a bit in a register
53*4882a593Smuzhiyun * up to @attempts times. If @valp is not NULL the value of the register
54*4882a593Smuzhiyun * at the time it indicated completion is stored there. Returns 0 if the
55*4882a593Smuzhiyun * operation completes and -EAGAIN otherwise.
56*4882a593Smuzhiyun */
t4_wait_op_done_val(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay,u32 * valp)57*4882a593Smuzhiyun static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58*4882a593Smuzhiyun int polarity, int attempts, int delay, u32 *valp)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun while (1) {
61*4882a593Smuzhiyun u32 val = t4_read_reg(adapter, reg);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun if (!!(val & mask) == polarity) {
64*4882a593Smuzhiyun if (valp)
65*4882a593Smuzhiyun *valp = val;
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun if (--attempts == 0)
69*4882a593Smuzhiyun return -EAGAIN;
70*4882a593Smuzhiyun if (delay)
71*4882a593Smuzhiyun udelay(delay);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
t4_wait_op_done(struct adapter * adapter,int reg,u32 mask,int polarity,int attempts,int delay)75*4882a593Smuzhiyun static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76*4882a593Smuzhiyun int polarity, int attempts, int delay)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79*4882a593Smuzhiyun delay, NULL);
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /**
83*4882a593Smuzhiyun * t4_set_reg_field - set a register field to a value
84*4882a593Smuzhiyun * @adapter: the adapter to program
85*4882a593Smuzhiyun * @addr: the register address
86*4882a593Smuzhiyun * @mask: specifies the portion of the register to modify
87*4882a593Smuzhiyun * @val: the new value for the register field
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Sets a register field specified by the supplied mask to the
90*4882a593Smuzhiyun * given value.
91*4882a593Smuzhiyun */
t4_set_reg_field(struct adapter * adapter,unsigned int addr,u32 mask,u32 val)92*4882a593Smuzhiyun void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93*4882a593Smuzhiyun u32 val)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun u32 v = t4_read_reg(adapter, addr) & ~mask;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun t4_write_reg(adapter, addr, v | val);
98*4882a593Smuzhiyun (void) t4_read_reg(adapter, addr); /* flush */
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /**
102*4882a593Smuzhiyun * t4_read_indirect - read indirectly addressed registers
103*4882a593Smuzhiyun * @adap: the adapter
104*4882a593Smuzhiyun * @addr_reg: register holding the indirect address
105*4882a593Smuzhiyun * @data_reg: register holding the value of the indirect register
106*4882a593Smuzhiyun * @vals: where the read register values are stored
107*4882a593Smuzhiyun * @nregs: how many indirect registers to read
108*4882a593Smuzhiyun * @start_idx: index of first indirect register to read
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Reads registers that are accessed indirectly through an address/data
111*4882a593Smuzhiyun * register pair.
112*4882a593Smuzhiyun */
t4_read_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,u32 * vals,unsigned int nregs,unsigned int start_idx)113*4882a593Smuzhiyun void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114*4882a593Smuzhiyun unsigned int data_reg, u32 *vals,
115*4882a593Smuzhiyun unsigned int nregs, unsigned int start_idx)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun while (nregs--) {
118*4882a593Smuzhiyun t4_write_reg(adap, addr_reg, start_idx);
119*4882a593Smuzhiyun *vals++ = t4_read_reg(adap, data_reg);
120*4882a593Smuzhiyun start_idx++;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /**
125*4882a593Smuzhiyun * t4_write_indirect - write indirectly addressed registers
126*4882a593Smuzhiyun * @adap: the adapter
127*4882a593Smuzhiyun * @addr_reg: register holding the indirect addresses
128*4882a593Smuzhiyun * @data_reg: register holding the value for the indirect registers
129*4882a593Smuzhiyun * @vals: values to write
130*4882a593Smuzhiyun * @nregs: how many indirect registers to write
131*4882a593Smuzhiyun * @start_idx: address of first indirect register to write
132*4882a593Smuzhiyun *
133*4882a593Smuzhiyun * Writes a sequential block of registers that are accessed indirectly
134*4882a593Smuzhiyun * through an address/data register pair.
135*4882a593Smuzhiyun */
t4_write_indirect(struct adapter * adap,unsigned int addr_reg,unsigned int data_reg,const u32 * vals,unsigned int nregs,unsigned int start_idx)136*4882a593Smuzhiyun void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137*4882a593Smuzhiyun unsigned int data_reg, const u32 *vals,
138*4882a593Smuzhiyun unsigned int nregs, unsigned int start_idx)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun while (nregs--) {
141*4882a593Smuzhiyun t4_write_reg(adap, addr_reg, start_idx++);
142*4882a593Smuzhiyun t4_write_reg(adap, data_reg, *vals++);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148*4882a593Smuzhiyun * mechanism. This guarantees that we get the real value even if we're
149*4882a593Smuzhiyun * operating within a Virtual Machine and the Hypervisor is trapping our
150*4882a593Smuzhiyun * Configuration Space accesses.
151*4882a593Smuzhiyun */
t4_hw_pci_read_cfg4(struct adapter * adap,int reg,u32 * val)152*4882a593Smuzhiyun void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
157*4882a593Smuzhiyun req |= ENABLE_F;
158*4882a593Smuzhiyun else
159*4882a593Smuzhiyun req |= T6_ENABLE_F;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (is_t4(adap->params.chip))
162*4882a593Smuzhiyun req |= LOCALCFG_F;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165*4882a593Smuzhiyun *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168*4882a593Smuzhiyun * Configuration Space read. (None of the other fields matter when
169*4882a593Smuzhiyun * ENABLE is 0 so a simple register write is easier than a
170*4882a593Smuzhiyun * read-modify-write via t4_set_reg_field().)
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * t4_report_fw_error - report firmware error
177*4882a593Smuzhiyun * @adap: the adapter
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * The adapter firmware can indicate error conditions to the host.
180*4882a593Smuzhiyun * If the firmware has indicated an error, print out the reason for
181*4882a593Smuzhiyun * the firmware error.
182*4882a593Smuzhiyun */
t4_report_fw_error(struct adapter * adap)183*4882a593Smuzhiyun static void t4_report_fw_error(struct adapter *adap)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun static const char *const reason[] = {
186*4882a593Smuzhiyun "Crash", /* PCIE_FW_EVAL_CRASH */
187*4882a593Smuzhiyun "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188*4882a593Smuzhiyun "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189*4882a593Smuzhiyun "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190*4882a593Smuzhiyun "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191*4882a593Smuzhiyun "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192*4882a593Smuzhiyun "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193*4882a593Smuzhiyun "Reserved", /* reserved */
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun u32 pcie_fw;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198*4882a593Smuzhiyun if (pcie_fw & PCIE_FW_ERR_F) {
199*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200*4882a593Smuzhiyun reason[PCIE_FW_EVAL_G(pcie_fw)]);
201*4882a593Smuzhiyun adap->flags &= ~CXGB4_FW_OK;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * Get the reply to a mailbox command and store it in @rpl in big-endian order.
207*4882a593Smuzhiyun */
get_mbox_rpl(struct adapter * adap,__be64 * rpl,int nflit,u32 mbox_addr)208*4882a593Smuzhiyun static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
209*4882a593Smuzhiyun u32 mbox_addr)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun for ( ; nflit; nflit--, mbox_addr += 8)
212*4882a593Smuzhiyun *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * Handle a FW assertion reported in a mailbox.
217*4882a593Smuzhiyun */
fw_asrt(struct adapter * adap,u32 mbox_addr)218*4882a593Smuzhiyun static void fw_asrt(struct adapter *adap, u32 mbox_addr)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct fw_debug_cmd asrt;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223*4882a593Smuzhiyun dev_alert(adap->pdev_dev,
224*4882a593Smuzhiyun "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225*4882a593Smuzhiyun asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226*4882a593Smuzhiyun be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231*4882a593Smuzhiyun * @adapter: the adapter
232*4882a593Smuzhiyun * @cmd: the Firmware Mailbox Command or Reply
233*4882a593Smuzhiyun * @size: command length in bytes
234*4882a593Smuzhiyun * @access: the time (ms) needed to access the Firmware Mailbox
235*4882a593Smuzhiyun * @execute: the time (ms) the command spent being executed
236*4882a593Smuzhiyun */
t4_record_mbox(struct adapter * adapter,const __be64 * cmd,unsigned int size,int access,int execute)237*4882a593Smuzhiyun static void t4_record_mbox(struct adapter *adapter,
238*4882a593Smuzhiyun const __be64 *cmd, unsigned int size,
239*4882a593Smuzhiyun int access, int execute)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct mbox_cmd_log *log = adapter->mbox_log;
242*4882a593Smuzhiyun struct mbox_cmd *entry;
243*4882a593Smuzhiyun int i;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun entry = mbox_cmd_log_entry(log, log->cursor++);
246*4882a593Smuzhiyun if (log->cursor == log->size)
247*4882a593Smuzhiyun log->cursor = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun for (i = 0; i < size / 8; i++)
250*4882a593Smuzhiyun entry->cmd[i] = be64_to_cpu(cmd[i]);
251*4882a593Smuzhiyun while (i < MBOX_LEN / 8)
252*4882a593Smuzhiyun entry->cmd[i++] = 0;
253*4882a593Smuzhiyun entry->timestamp = jiffies;
254*4882a593Smuzhiyun entry->seqno = log->seqno++;
255*4882a593Smuzhiyun entry->access = access;
256*4882a593Smuzhiyun entry->execute = execute;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
261*4882a593Smuzhiyun * @adap: the adapter
262*4882a593Smuzhiyun * @mbox: index of the mailbox to use
263*4882a593Smuzhiyun * @cmd: the command to write
264*4882a593Smuzhiyun * @size: command length in bytes
265*4882a593Smuzhiyun * @rpl: where to optionally store the reply
266*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
267*4882a593Smuzhiyun * @timeout: time to wait for command to finish before timing out
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * Sends the given command to FW through the selected mailbox and waits
270*4882a593Smuzhiyun * for the FW to execute the command. If @rpl is not %NULL it is used to
271*4882a593Smuzhiyun * store the FW's reply to the command. The command and its optional
272*4882a593Smuzhiyun * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273*4882a593Smuzhiyun * to respond. @sleep_ok determines whether we may sleep while awaiting
274*4882a593Smuzhiyun * the response. If sleeping is allowed we use progressive backoff
275*4882a593Smuzhiyun * otherwise we spin.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * The return value is 0 on success or a negative errno on failure. A
278*4882a593Smuzhiyun * failure can happen either because we are not able to execute the
279*4882a593Smuzhiyun * command or FW executes it but signals an error. In the latter case
280*4882a593Smuzhiyun * the return value is the error code indicated by FW (negated).
281*4882a593Smuzhiyun */
t4_wr_mbox_meat_timeout(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok,int timeout)282*4882a593Smuzhiyun int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283*4882a593Smuzhiyun int size, void *rpl, bool sleep_ok, int timeout)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun static const int delay[] = {
286*4882a593Smuzhiyun 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun struct mbox_list entry;
290*4882a593Smuzhiyun u16 access = 0;
291*4882a593Smuzhiyun u16 execute = 0;
292*4882a593Smuzhiyun u32 v;
293*4882a593Smuzhiyun u64 res;
294*4882a593Smuzhiyun int i, ms, delay_idx, ret;
295*4882a593Smuzhiyun const __be64 *p = cmd;
296*4882a593Smuzhiyun u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297*4882a593Smuzhiyun u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298*4882a593Smuzhiyun __be64 cmd_rpl[MBOX_LEN / 8];
299*4882a593Smuzhiyun u32 pcie_fw;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if ((size & 15) || size > MBOX_LEN)
302*4882a593Smuzhiyun return -EINVAL;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * If the device is off-line, as in EEH, commands will time out.
306*4882a593Smuzhiyun * Fail them early so we don't waste time waiting.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun if (adap->pdev->error_state != pci_channel_io_normal)
309*4882a593Smuzhiyun return -EIO;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* If we have a negative timeout, that implies that we can't sleep. */
312*4882a593Smuzhiyun if (timeout < 0) {
313*4882a593Smuzhiyun sleep_ok = false;
314*4882a593Smuzhiyun timeout = -timeout;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /* Queue ourselves onto the mailbox access list. When our entry is at
318*4882a593Smuzhiyun * the front of the list, we have rights to access the mailbox. So we
319*4882a593Smuzhiyun * wait [for a while] till we're at the front [or bail out with an
320*4882a593Smuzhiyun * EBUSY] ...
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun spin_lock_bh(&adap->mbox_lock);
323*4882a593Smuzhiyun list_add_tail(&entry.list, &adap->mlist.list);
324*4882a593Smuzhiyun spin_unlock_bh(&adap->mbox_lock);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun delay_idx = 0;
327*4882a593Smuzhiyun ms = delay[0];
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun for (i = 0; ; i += ms) {
330*4882a593Smuzhiyun /* If we've waited too long, return a busy indication. This
331*4882a593Smuzhiyun * really ought to be based on our initial position in the
332*4882a593Smuzhiyun * mailbox access list but this is a start. We very rarely
333*4882a593Smuzhiyun * contend on access to the mailbox ...
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336*4882a593Smuzhiyun if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337*4882a593Smuzhiyun spin_lock_bh(&adap->mbox_lock);
338*4882a593Smuzhiyun list_del(&entry.list);
339*4882a593Smuzhiyun spin_unlock_bh(&adap->mbox_lock);
340*4882a593Smuzhiyun ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341*4882a593Smuzhiyun t4_record_mbox(adap, cmd, size, access, ret);
342*4882a593Smuzhiyun return ret;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* If we're at the head, break out and start the mailbox
346*4882a593Smuzhiyun * protocol.
347*4882a593Smuzhiyun */
348*4882a593Smuzhiyun if (list_first_entry(&adap->mlist.list, struct mbox_list,
349*4882a593Smuzhiyun list) == &entry)
350*4882a593Smuzhiyun break;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Delay for a bit before checking again ... */
353*4882a593Smuzhiyun if (sleep_ok) {
354*4882a593Smuzhiyun ms = delay[delay_idx]; /* last element may repeat */
355*4882a593Smuzhiyun if (delay_idx < ARRAY_SIZE(delay) - 1)
356*4882a593Smuzhiyun delay_idx++;
357*4882a593Smuzhiyun msleep(ms);
358*4882a593Smuzhiyun } else {
359*4882a593Smuzhiyun mdelay(ms);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Loop trying to get ownership of the mailbox. Return an error
364*4882a593Smuzhiyun * if we can't gain ownership.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367*4882a593Smuzhiyun for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368*4882a593Smuzhiyun v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369*4882a593Smuzhiyun if (v != MBOX_OWNER_DRV) {
370*4882a593Smuzhiyun spin_lock_bh(&adap->mbox_lock);
371*4882a593Smuzhiyun list_del(&entry.list);
372*4882a593Smuzhiyun spin_unlock_bh(&adap->mbox_lock);
373*4882a593Smuzhiyun ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374*4882a593Smuzhiyun t4_record_mbox(adap, cmd, size, access, ret);
375*4882a593Smuzhiyun return ret;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /* Copy in the new mailbox command and send it on its way ... */
379*4882a593Smuzhiyun t4_record_mbox(adap, cmd, size, access, 0);
380*4882a593Smuzhiyun for (i = 0; i < size; i += 8)
381*4882a593Smuzhiyun t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384*4882a593Smuzhiyun t4_read_reg(adap, ctl_reg); /* flush write */
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun delay_idx = 0;
387*4882a593Smuzhiyun ms = delay[0];
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun for (i = 0;
390*4882a593Smuzhiyun !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
391*4882a593Smuzhiyun i < timeout;
392*4882a593Smuzhiyun i += ms) {
393*4882a593Smuzhiyun if (sleep_ok) {
394*4882a593Smuzhiyun ms = delay[delay_idx]; /* last element may repeat */
395*4882a593Smuzhiyun if (delay_idx < ARRAY_SIZE(delay) - 1)
396*4882a593Smuzhiyun delay_idx++;
397*4882a593Smuzhiyun msleep(ms);
398*4882a593Smuzhiyun } else
399*4882a593Smuzhiyun mdelay(ms);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun v = t4_read_reg(adap, ctl_reg);
402*4882a593Smuzhiyun if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403*4882a593Smuzhiyun if (!(v & MBMSGVALID_F)) {
404*4882a593Smuzhiyun t4_write_reg(adap, ctl_reg, 0);
405*4882a593Smuzhiyun continue;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409*4882a593Smuzhiyun res = be64_to_cpu(cmd_rpl[0]);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412*4882a593Smuzhiyun fw_asrt(adap, data_reg);
413*4882a593Smuzhiyun res = FW_CMD_RETVAL_V(EIO);
414*4882a593Smuzhiyun } else if (rpl) {
415*4882a593Smuzhiyun memcpy(rpl, cmd_rpl, size);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun t4_write_reg(adap, ctl_reg, 0);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun execute = i + ms;
421*4882a593Smuzhiyun t4_record_mbox(adap, cmd_rpl,
422*4882a593Smuzhiyun MBOX_LEN, access, execute);
423*4882a593Smuzhiyun spin_lock_bh(&adap->mbox_lock);
424*4882a593Smuzhiyun list_del(&entry.list);
425*4882a593Smuzhiyun spin_unlock_bh(&adap->mbox_lock);
426*4882a593Smuzhiyun return -FW_CMD_RETVAL_G((int)res);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431*4882a593Smuzhiyun t4_record_mbox(adap, cmd, size, access, ret);
432*4882a593Smuzhiyun dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433*4882a593Smuzhiyun *(const u8 *)cmd, mbox);
434*4882a593Smuzhiyun t4_report_fw_error(adap);
435*4882a593Smuzhiyun spin_lock_bh(&adap->mbox_lock);
436*4882a593Smuzhiyun list_del(&entry.list);
437*4882a593Smuzhiyun spin_unlock_bh(&adap->mbox_lock);
438*4882a593Smuzhiyun t4_fatal_err(adap);
439*4882a593Smuzhiyun return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
t4_wr_mbox_meat(struct adapter * adap,int mbox,const void * cmd,int size,void * rpl,bool sleep_ok)442*4882a593Smuzhiyun int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443*4882a593Smuzhiyun void *rpl, bool sleep_ok)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
446*4882a593Smuzhiyun FW_CMD_MAX_TIMEOUT);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
t4_edc_err_read(struct adapter * adap,int idx)449*4882a593Smuzhiyun static int t4_edc_err_read(struct adapter *adap, int idx)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun u32 edc_ecc_err_addr_reg;
452*4882a593Smuzhiyun u32 rdata_reg;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
455*4882a593Smuzhiyun CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
456*4882a593Smuzhiyun return 0;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun if (idx != 0 && idx != 1) {
459*4882a593Smuzhiyun CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
460*4882a593Smuzhiyun return 0;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464*4882a593Smuzhiyun rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun CH_WARN(adap,
467*4882a593Smuzhiyun "edc%d err addr 0x%x: 0x%x.\n",
468*4882a593Smuzhiyun idx, edc_ecc_err_addr_reg,
469*4882a593Smuzhiyun t4_read_reg(adap, edc_ecc_err_addr_reg));
470*4882a593Smuzhiyun CH_WARN(adap,
471*4882a593Smuzhiyun "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
472*4882a593Smuzhiyun rdata_reg,
473*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg),
474*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481*4882a593Smuzhiyun (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return 0;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /**
487*4882a593Smuzhiyun * t4_memory_rw_init - Get memory window relative offset, base, and size.
488*4882a593Smuzhiyun * @adap: the adapter
489*4882a593Smuzhiyun * @win: PCI-E Memory Window to use
490*4882a593Smuzhiyun * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491*4882a593Smuzhiyun * @mem_off: memory relative offset with respect to @mtype.
492*4882a593Smuzhiyun * @mem_base: configured memory base address.
493*4882a593Smuzhiyun * @mem_aperture: configured memory window aperture.
494*4882a593Smuzhiyun *
495*4882a593Smuzhiyun * Get the configured memory window's relative offset, base, and size.
496*4882a593Smuzhiyun */
t4_memory_rw_init(struct adapter * adap,int win,int mtype,u32 * mem_off,u32 * mem_base,u32 * mem_aperture)497*4882a593Smuzhiyun int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498*4882a593Smuzhiyun u32 *mem_base, u32 *mem_aperture)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun u32 edc_size, mc_size, mem_reg;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Offset into the region of memory which is being accessed
503*4882a593Smuzhiyun * MEM_EDC0 = 0
504*4882a593Smuzhiyun * MEM_EDC1 = 1
505*4882a593Smuzhiyun * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
506*4882a593Smuzhiyun * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
507*4882a593Smuzhiyun * MEM_HMA = 4
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510*4882a593Smuzhiyun if (mtype == MEM_HMA) {
511*4882a593Smuzhiyun *mem_off = 2 * (edc_size * 1024 * 1024);
512*4882a593Smuzhiyun } else if (mtype != MEM_MC1) {
513*4882a593Smuzhiyun *mem_off = (mtype * (edc_size * 1024 * 1024));
514*4882a593Smuzhiyun } else {
515*4882a593Smuzhiyun mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516*4882a593Smuzhiyun MA_EXT_MEMORY0_BAR_A));
517*4882a593Smuzhiyun *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Each PCI-E Memory Window is programmed with a window size -- or
521*4882a593Smuzhiyun * "aperture" -- which controls the granularity of its mapping onto
522*4882a593Smuzhiyun * adapter memory. We need to grab that aperture in order to know
523*4882a593Smuzhiyun * how to use the specified window. The window is also programmed
524*4882a593Smuzhiyun * with the base address of the Memory Window in BAR0's address
525*4882a593Smuzhiyun * space. For T4 this is an absolute PCI-E Bus Address. For T5
526*4882a593Smuzhiyun * the address is relative to BAR0.
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun mem_reg = t4_read_reg(adap,
529*4882a593Smuzhiyun PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
530*4882a593Smuzhiyun win));
531*4882a593Smuzhiyun /* a dead adapter will return 0xffffffff for PIO reads */
532*4882a593Smuzhiyun if (mem_reg == 0xffffffff)
533*4882a593Smuzhiyun return -ENXIO;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536*4882a593Smuzhiyun *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537*4882a593Smuzhiyun if (is_t4(adap->params.chip))
538*4882a593Smuzhiyun *mem_base -= adap->t4_bar0;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /**
544*4882a593Smuzhiyun * t4_memory_update_win - Move memory window to specified address.
545*4882a593Smuzhiyun * @adap: the adapter
546*4882a593Smuzhiyun * @win: PCI-E Memory Window to use
547*4882a593Smuzhiyun * @addr: location to move.
548*4882a593Smuzhiyun *
549*4882a593Smuzhiyun * Move memory window to specified address.
550*4882a593Smuzhiyun */
t4_memory_update_win(struct adapter * adap,int win,u32 addr)551*4882a593Smuzhiyun void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun t4_write_reg(adap,
554*4882a593Smuzhiyun PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
555*4882a593Smuzhiyun addr);
556*4882a593Smuzhiyun /* Read it back to ensure that changes propagate before we
557*4882a593Smuzhiyun * attempt to use the new value.
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun t4_read_reg(adap,
560*4882a593Smuzhiyun PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun * t4_memory_rw_residual - Read/Write residual data.
565*4882a593Smuzhiyun * @adap: the adapter
566*4882a593Smuzhiyun * @off: relative offset within residual to start read/write.
567*4882a593Smuzhiyun * @addr: address within indicated memory type.
568*4882a593Smuzhiyun * @buf: host memory buffer
569*4882a593Smuzhiyun * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
570*4882a593Smuzhiyun *
571*4882a593Smuzhiyun * Read/Write residual data less than 32-bits.
572*4882a593Smuzhiyun */
t4_memory_rw_residual(struct adapter * adap,u32 off,u32 addr,u8 * buf,int dir)573*4882a593Smuzhiyun void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
574*4882a593Smuzhiyun int dir)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun union {
577*4882a593Smuzhiyun u32 word;
578*4882a593Smuzhiyun char byte[4];
579*4882a593Smuzhiyun } last;
580*4882a593Smuzhiyun unsigned char *bp;
581*4882a593Smuzhiyun int i;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (dir == T4_MEMORY_READ) {
584*4882a593Smuzhiyun last.word = le32_to_cpu((__force __le32)
585*4882a593Smuzhiyun t4_read_reg(adap, addr));
586*4882a593Smuzhiyun for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587*4882a593Smuzhiyun bp[i] = last.byte[i];
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun last.word = *buf;
590*4882a593Smuzhiyun for (i = off; i < 4; i++)
591*4882a593Smuzhiyun last.byte[i] = 0;
592*4882a593Smuzhiyun t4_write_reg(adap, addr,
593*4882a593Smuzhiyun (__force u32)cpu_to_le32(last.word));
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /**
598*4882a593Smuzhiyun * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
599*4882a593Smuzhiyun * @adap: the adapter
600*4882a593Smuzhiyun * @win: PCI-E Memory Window to use
601*4882a593Smuzhiyun * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602*4882a593Smuzhiyun * @addr: address within indicated memory type
603*4882a593Smuzhiyun * @len: amount of memory to transfer
604*4882a593Smuzhiyun * @hbuf: host memory buffer
605*4882a593Smuzhiyun * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * Reads/writes an [almost] arbitrary memory region in the firmware: the
608*4882a593Smuzhiyun * firmware memory address and host buffer must be aligned on 32-bit
609*4882a593Smuzhiyun * boundaries; the length may be arbitrary. The memory is transferred as
610*4882a593Smuzhiyun * a raw byte sequence from/to the firmware's memory. If this memory
611*4882a593Smuzhiyun * contains data structures which contain multi-byte integers, it's the
612*4882a593Smuzhiyun * caller's responsibility to perform appropriate byte order conversions.
613*4882a593Smuzhiyun */
t4_memory_rw(struct adapter * adap,int win,int mtype,u32 addr,u32 len,void * hbuf,int dir)614*4882a593Smuzhiyun int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615*4882a593Smuzhiyun u32 len, void *hbuf, int dir)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun u32 pos, offset, resid, memoffset;
618*4882a593Smuzhiyun u32 win_pf, mem_aperture, mem_base;
619*4882a593Smuzhiyun u32 *buf;
620*4882a593Smuzhiyun int ret;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Argument sanity checks ...
623*4882a593Smuzhiyun */
624*4882a593Smuzhiyun if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
625*4882a593Smuzhiyun return -EINVAL;
626*4882a593Smuzhiyun buf = (u32 *)hbuf;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* It's convenient to be able to handle lengths which aren't a
629*4882a593Smuzhiyun * multiple of 32-bits because we often end up transferring files to
630*4882a593Smuzhiyun * the firmware. So we'll handle that by normalizing the length here
631*4882a593Smuzhiyun * and then handling any residual transfer at the end.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun resid = len & 0x3;
634*4882a593Smuzhiyun len -= resid;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
637*4882a593Smuzhiyun &mem_aperture);
638*4882a593Smuzhiyun if (ret)
639*4882a593Smuzhiyun return ret;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /* Determine the PCIE_MEM_ACCESS_OFFSET */
642*4882a593Smuzhiyun addr = addr + memoffset;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* Calculate our initial PCI-E Memory Window Position and Offset into
647*4882a593Smuzhiyun * that Window.
648*4882a593Smuzhiyun */
649*4882a593Smuzhiyun pos = addr & ~(mem_aperture - 1);
650*4882a593Smuzhiyun offset = addr - pos;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* Set up initial PCI-E Memory Window to cover the start of our
653*4882a593Smuzhiyun * transfer.
654*4882a593Smuzhiyun */
655*4882a593Smuzhiyun t4_memory_update_win(adap, win, pos | win_pf);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Transfer data to/from the adapter as long as there's an integral
658*4882a593Smuzhiyun * number of 32-bit transfers to complete.
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * A note on Endianness issues:
661*4882a593Smuzhiyun *
662*4882a593Smuzhiyun * The "register" reads and writes below from/to the PCI-E Memory
663*4882a593Smuzhiyun * Window invoke the standard adapter Big-Endian to PCI-E Link
664*4882a593Smuzhiyun * Little-Endian "swizzel." As a result, if we have the following
665*4882a593Smuzhiyun * data in adapter memory:
666*4882a593Smuzhiyun *
667*4882a593Smuzhiyun * Memory: ... | b0 | b1 | b2 | b3 | ...
668*4882a593Smuzhiyun * Address: i+0 i+1 i+2 i+3
669*4882a593Smuzhiyun *
670*4882a593Smuzhiyun * Then a read of the adapter memory via the PCI-E Memory Window
671*4882a593Smuzhiyun * will yield:
672*4882a593Smuzhiyun *
673*4882a593Smuzhiyun * x = readl(i)
674*4882a593Smuzhiyun * 31 0
675*4882a593Smuzhiyun * [ b3 | b2 | b1 | b0 ]
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * If this value is stored into local memory on a Little-Endian system
678*4882a593Smuzhiyun * it will show up correctly in local memory as:
679*4882a593Smuzhiyun *
680*4882a593Smuzhiyun * ( ..., b0, b1, b2, b3, ... )
681*4882a593Smuzhiyun *
682*4882a593Smuzhiyun * But on a Big-Endian system, the store will show up in memory
683*4882a593Smuzhiyun * incorrectly swizzled as:
684*4882a593Smuzhiyun *
685*4882a593Smuzhiyun * ( ..., b3, b2, b1, b0, ... )
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * So we need to account for this in the reads and writes to the
688*4882a593Smuzhiyun * PCI-E Memory Window below by undoing the register read/write
689*4882a593Smuzhiyun * swizzels.
690*4882a593Smuzhiyun */
691*4882a593Smuzhiyun while (len > 0) {
692*4882a593Smuzhiyun if (dir == T4_MEMORY_READ)
693*4882a593Smuzhiyun *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
694*4882a593Smuzhiyun mem_base + offset));
695*4882a593Smuzhiyun else
696*4882a593Smuzhiyun t4_write_reg(adap, mem_base + offset,
697*4882a593Smuzhiyun (__force u32)cpu_to_le32(*buf++));
698*4882a593Smuzhiyun offset += sizeof(__be32);
699*4882a593Smuzhiyun len -= sizeof(__be32);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* If we've reached the end of our current window aperture,
702*4882a593Smuzhiyun * move the PCI-E Memory Window on to the next. Note that
703*4882a593Smuzhiyun * doing this here after "len" may be 0 allows us to set up
704*4882a593Smuzhiyun * the PCI-E Memory Window for a possible final residual
705*4882a593Smuzhiyun * transfer below ...
706*4882a593Smuzhiyun */
707*4882a593Smuzhiyun if (offset == mem_aperture) {
708*4882a593Smuzhiyun pos += mem_aperture;
709*4882a593Smuzhiyun offset = 0;
710*4882a593Smuzhiyun t4_memory_update_win(adap, win, pos | win_pf);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* If the original transfer had a length which wasn't a multiple of
715*4882a593Smuzhiyun * 32-bits, now's where we need to finish off the transfer of the
716*4882a593Smuzhiyun * residual amount. The PCI-E Memory Window has already been moved
717*4882a593Smuzhiyun * above (if necessary) to cover this final transfer.
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun if (resid)
720*4882a593Smuzhiyun t4_memory_rw_residual(adap, resid, mem_base + offset,
721*4882a593Smuzhiyun (u8 *)buf, dir);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Return the specified PCI-E Configuration Space register from our Physical
727*4882a593Smuzhiyun * Function. We try first via a Firmware LDST Command since we prefer to let
728*4882a593Smuzhiyun * the firmware own all of these registers, but if that fails we go for it
729*4882a593Smuzhiyun * directly ourselves.
730*4882a593Smuzhiyun */
t4_read_pcie_cfg4(struct adapter * adap,int reg)731*4882a593Smuzhiyun u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun u32 val, ldst_addrspace;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* If fw_attach != 0, construct and send the Firmware LDST Command to
736*4882a593Smuzhiyun * retrieve the specified PCI-E Configuration Space register.
737*4882a593Smuzhiyun */
738*4882a593Smuzhiyun struct fw_ldst_cmd ldst_cmd;
739*4882a593Smuzhiyun int ret;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742*4882a593Smuzhiyun ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743*4882a593Smuzhiyun ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
744*4882a593Smuzhiyun FW_CMD_REQUEST_F |
745*4882a593Smuzhiyun FW_CMD_READ_F |
746*4882a593Smuzhiyun ldst_addrspace);
747*4882a593Smuzhiyun ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748*4882a593Smuzhiyun ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749*4882a593Smuzhiyun ldst_cmd.u.pcie.ctrl_to_fn =
750*4882a593Smuzhiyun (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751*4882a593Smuzhiyun ldst_cmd.u.pcie.r = reg;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun /* If the LDST Command succeeds, return the result, otherwise
754*4882a593Smuzhiyun * fall through to reading it directly ourselves ...
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
757*4882a593Smuzhiyun &ldst_cmd);
758*4882a593Smuzhiyun if (ret == 0)
759*4882a593Smuzhiyun val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
760*4882a593Smuzhiyun else
761*4882a593Smuzhiyun /* Read the desired Configuration Space register via the PCI-E
762*4882a593Smuzhiyun * Backdoor mechanism.
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun t4_hw_pci_read_cfg4(adap, reg, &val);
765*4882a593Smuzhiyun return val;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /* Get the window based on base passed to it.
769*4882a593Smuzhiyun * Window aperture is currently unhandled, but there is no use case for it
770*4882a593Smuzhiyun * right now
771*4882a593Smuzhiyun */
t4_get_window(struct adapter * adap,u32 pci_base,u64 pci_mask,u32 memwin_base)772*4882a593Smuzhiyun static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
773*4882a593Smuzhiyun u32 memwin_base)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun u32 ret;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
778*4882a593Smuzhiyun u32 bar0;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* Truncation intentional: we only read the bottom 32-bits of
781*4882a593Smuzhiyun * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
782*4882a593Smuzhiyun * mechanism to read BAR0 instead of using
783*4882a593Smuzhiyun * pci_resource_start() because we could be operating from
784*4882a593Smuzhiyun * within a Virtual Machine which is trapping our accesses to
785*4882a593Smuzhiyun * our Configuration Space and we need to set up the PCI-E
786*4882a593Smuzhiyun * Memory Window decoders with the actual addresses which will
787*4882a593Smuzhiyun * be coming across the PCI-E link.
788*4882a593Smuzhiyun */
789*4882a593Smuzhiyun bar0 = t4_read_pcie_cfg4(adap, pci_base);
790*4882a593Smuzhiyun bar0 &= pci_mask;
791*4882a593Smuzhiyun adap->t4_bar0 = bar0;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun ret = bar0 + memwin_base;
794*4882a593Smuzhiyun } else {
795*4882a593Smuzhiyun /* For T5, only relative offset inside the PCIe BAR is passed */
796*4882a593Smuzhiyun ret = memwin_base;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun return ret;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /* Get the default utility window (win0) used by everyone */
t4_get_util_window(struct adapter * adap)802*4882a593Smuzhiyun u32 t4_get_util_window(struct adapter *adap)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805*4882a593Smuzhiyun PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* Set up memory window for accessing adapter memory ranges. (Read
809*4882a593Smuzhiyun * back MA register to ensure that changes propagate before we attempt
810*4882a593Smuzhiyun * to use the new values.)
811*4882a593Smuzhiyun */
t4_setup_memwin(struct adapter * adap,u32 memwin_base,u32 window)812*4882a593Smuzhiyun void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun t4_write_reg(adap,
815*4882a593Smuzhiyun PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816*4882a593Smuzhiyun memwin_base | BIR_V(0) |
817*4882a593Smuzhiyun WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
818*4882a593Smuzhiyun t4_read_reg(adap,
819*4882a593Smuzhiyun PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /**
823*4882a593Smuzhiyun * t4_get_regs_len - return the size of the chips register set
824*4882a593Smuzhiyun * @adapter: the adapter
825*4882a593Smuzhiyun *
826*4882a593Smuzhiyun * Returns the size of the chip's BAR0 register space.
827*4882a593Smuzhiyun */
t4_get_regs_len(struct adapter * adapter)828*4882a593Smuzhiyun unsigned int t4_get_regs_len(struct adapter *adapter)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun switch (chip_version) {
833*4882a593Smuzhiyun case CHELSIO_T4:
834*4882a593Smuzhiyun return T4_REGMAP_SIZE;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun case CHELSIO_T5:
837*4882a593Smuzhiyun case CHELSIO_T6:
838*4882a593Smuzhiyun return T5_REGMAP_SIZE;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
842*4882a593Smuzhiyun "Unsupported chip version %d\n", chip_version);
843*4882a593Smuzhiyun return 0;
844*4882a593Smuzhiyun }
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /**
847*4882a593Smuzhiyun * t4_get_regs - read chip registers into provided buffer
848*4882a593Smuzhiyun * @adap: the adapter
849*4882a593Smuzhiyun * @buf: register buffer
850*4882a593Smuzhiyun * @buf_size: size (in bytes) of register buffer
851*4882a593Smuzhiyun *
852*4882a593Smuzhiyun * If the provided register buffer isn't large enough for the chip's
853*4882a593Smuzhiyun * full register range, the register dump will be truncated to the
854*4882a593Smuzhiyun * register buffer's size.
855*4882a593Smuzhiyun */
t4_get_regs(struct adapter * adap,void * buf,size_t buf_size)856*4882a593Smuzhiyun void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
857*4882a593Smuzhiyun {
858*4882a593Smuzhiyun static const unsigned int t4_reg_ranges[] = {
859*4882a593Smuzhiyun 0x1008, 0x1108,
860*4882a593Smuzhiyun 0x1180, 0x1184,
861*4882a593Smuzhiyun 0x1190, 0x1194,
862*4882a593Smuzhiyun 0x11a0, 0x11a4,
863*4882a593Smuzhiyun 0x11b0, 0x11b4,
864*4882a593Smuzhiyun 0x11fc, 0x123c,
865*4882a593Smuzhiyun 0x1300, 0x173c,
866*4882a593Smuzhiyun 0x1800, 0x18fc,
867*4882a593Smuzhiyun 0x3000, 0x30d8,
868*4882a593Smuzhiyun 0x30e0, 0x30e4,
869*4882a593Smuzhiyun 0x30ec, 0x5910,
870*4882a593Smuzhiyun 0x5920, 0x5924,
871*4882a593Smuzhiyun 0x5960, 0x5960,
872*4882a593Smuzhiyun 0x5968, 0x5968,
873*4882a593Smuzhiyun 0x5970, 0x5970,
874*4882a593Smuzhiyun 0x5978, 0x5978,
875*4882a593Smuzhiyun 0x5980, 0x5980,
876*4882a593Smuzhiyun 0x5988, 0x5988,
877*4882a593Smuzhiyun 0x5990, 0x5990,
878*4882a593Smuzhiyun 0x5998, 0x5998,
879*4882a593Smuzhiyun 0x59a0, 0x59d4,
880*4882a593Smuzhiyun 0x5a00, 0x5ae0,
881*4882a593Smuzhiyun 0x5ae8, 0x5ae8,
882*4882a593Smuzhiyun 0x5af0, 0x5af0,
883*4882a593Smuzhiyun 0x5af8, 0x5af8,
884*4882a593Smuzhiyun 0x6000, 0x6098,
885*4882a593Smuzhiyun 0x6100, 0x6150,
886*4882a593Smuzhiyun 0x6200, 0x6208,
887*4882a593Smuzhiyun 0x6240, 0x6248,
888*4882a593Smuzhiyun 0x6280, 0x62b0,
889*4882a593Smuzhiyun 0x62c0, 0x6338,
890*4882a593Smuzhiyun 0x6370, 0x638c,
891*4882a593Smuzhiyun 0x6400, 0x643c,
892*4882a593Smuzhiyun 0x6500, 0x6524,
893*4882a593Smuzhiyun 0x6a00, 0x6a04,
894*4882a593Smuzhiyun 0x6a14, 0x6a38,
895*4882a593Smuzhiyun 0x6a60, 0x6a70,
896*4882a593Smuzhiyun 0x6a78, 0x6a78,
897*4882a593Smuzhiyun 0x6b00, 0x6b0c,
898*4882a593Smuzhiyun 0x6b1c, 0x6b84,
899*4882a593Smuzhiyun 0x6bf0, 0x6bf8,
900*4882a593Smuzhiyun 0x6c00, 0x6c0c,
901*4882a593Smuzhiyun 0x6c1c, 0x6c84,
902*4882a593Smuzhiyun 0x6cf0, 0x6cf8,
903*4882a593Smuzhiyun 0x6d00, 0x6d0c,
904*4882a593Smuzhiyun 0x6d1c, 0x6d84,
905*4882a593Smuzhiyun 0x6df0, 0x6df8,
906*4882a593Smuzhiyun 0x6e00, 0x6e0c,
907*4882a593Smuzhiyun 0x6e1c, 0x6e84,
908*4882a593Smuzhiyun 0x6ef0, 0x6ef8,
909*4882a593Smuzhiyun 0x6f00, 0x6f0c,
910*4882a593Smuzhiyun 0x6f1c, 0x6f84,
911*4882a593Smuzhiyun 0x6ff0, 0x6ff8,
912*4882a593Smuzhiyun 0x7000, 0x700c,
913*4882a593Smuzhiyun 0x701c, 0x7084,
914*4882a593Smuzhiyun 0x70f0, 0x70f8,
915*4882a593Smuzhiyun 0x7100, 0x710c,
916*4882a593Smuzhiyun 0x711c, 0x7184,
917*4882a593Smuzhiyun 0x71f0, 0x71f8,
918*4882a593Smuzhiyun 0x7200, 0x720c,
919*4882a593Smuzhiyun 0x721c, 0x7284,
920*4882a593Smuzhiyun 0x72f0, 0x72f8,
921*4882a593Smuzhiyun 0x7300, 0x730c,
922*4882a593Smuzhiyun 0x731c, 0x7384,
923*4882a593Smuzhiyun 0x73f0, 0x73f8,
924*4882a593Smuzhiyun 0x7400, 0x7450,
925*4882a593Smuzhiyun 0x7500, 0x7530,
926*4882a593Smuzhiyun 0x7600, 0x760c,
927*4882a593Smuzhiyun 0x7614, 0x761c,
928*4882a593Smuzhiyun 0x7680, 0x76cc,
929*4882a593Smuzhiyun 0x7700, 0x7798,
930*4882a593Smuzhiyun 0x77c0, 0x77fc,
931*4882a593Smuzhiyun 0x7900, 0x79fc,
932*4882a593Smuzhiyun 0x7b00, 0x7b58,
933*4882a593Smuzhiyun 0x7b60, 0x7b84,
934*4882a593Smuzhiyun 0x7b8c, 0x7c38,
935*4882a593Smuzhiyun 0x7d00, 0x7d38,
936*4882a593Smuzhiyun 0x7d40, 0x7d80,
937*4882a593Smuzhiyun 0x7d8c, 0x7ddc,
938*4882a593Smuzhiyun 0x7de4, 0x7e04,
939*4882a593Smuzhiyun 0x7e10, 0x7e1c,
940*4882a593Smuzhiyun 0x7e24, 0x7e38,
941*4882a593Smuzhiyun 0x7e40, 0x7e44,
942*4882a593Smuzhiyun 0x7e4c, 0x7e78,
943*4882a593Smuzhiyun 0x7e80, 0x7ea4,
944*4882a593Smuzhiyun 0x7eac, 0x7edc,
945*4882a593Smuzhiyun 0x7ee8, 0x7efc,
946*4882a593Smuzhiyun 0x8dc0, 0x8e04,
947*4882a593Smuzhiyun 0x8e10, 0x8e1c,
948*4882a593Smuzhiyun 0x8e30, 0x8e78,
949*4882a593Smuzhiyun 0x8ea0, 0x8eb8,
950*4882a593Smuzhiyun 0x8ec0, 0x8f6c,
951*4882a593Smuzhiyun 0x8fc0, 0x9008,
952*4882a593Smuzhiyun 0x9010, 0x9058,
953*4882a593Smuzhiyun 0x9060, 0x9060,
954*4882a593Smuzhiyun 0x9068, 0x9074,
955*4882a593Smuzhiyun 0x90fc, 0x90fc,
956*4882a593Smuzhiyun 0x9400, 0x9408,
957*4882a593Smuzhiyun 0x9410, 0x9458,
958*4882a593Smuzhiyun 0x9600, 0x9600,
959*4882a593Smuzhiyun 0x9608, 0x9638,
960*4882a593Smuzhiyun 0x9640, 0x96bc,
961*4882a593Smuzhiyun 0x9800, 0x9808,
962*4882a593Smuzhiyun 0x9820, 0x983c,
963*4882a593Smuzhiyun 0x9850, 0x9864,
964*4882a593Smuzhiyun 0x9c00, 0x9c6c,
965*4882a593Smuzhiyun 0x9c80, 0x9cec,
966*4882a593Smuzhiyun 0x9d00, 0x9d6c,
967*4882a593Smuzhiyun 0x9d80, 0x9dec,
968*4882a593Smuzhiyun 0x9e00, 0x9e6c,
969*4882a593Smuzhiyun 0x9e80, 0x9eec,
970*4882a593Smuzhiyun 0x9f00, 0x9f6c,
971*4882a593Smuzhiyun 0x9f80, 0x9fec,
972*4882a593Smuzhiyun 0xd004, 0xd004,
973*4882a593Smuzhiyun 0xd010, 0xd03c,
974*4882a593Smuzhiyun 0xdfc0, 0xdfe0,
975*4882a593Smuzhiyun 0xe000, 0xea7c,
976*4882a593Smuzhiyun 0xf000, 0x11110,
977*4882a593Smuzhiyun 0x11118, 0x11190,
978*4882a593Smuzhiyun 0x19040, 0x1906c,
979*4882a593Smuzhiyun 0x19078, 0x19080,
980*4882a593Smuzhiyun 0x1908c, 0x190e4,
981*4882a593Smuzhiyun 0x190f0, 0x190f8,
982*4882a593Smuzhiyun 0x19100, 0x19110,
983*4882a593Smuzhiyun 0x19120, 0x19124,
984*4882a593Smuzhiyun 0x19150, 0x19194,
985*4882a593Smuzhiyun 0x1919c, 0x191b0,
986*4882a593Smuzhiyun 0x191d0, 0x191e8,
987*4882a593Smuzhiyun 0x19238, 0x1924c,
988*4882a593Smuzhiyun 0x193f8, 0x1943c,
989*4882a593Smuzhiyun 0x1944c, 0x19474,
990*4882a593Smuzhiyun 0x19490, 0x194e0,
991*4882a593Smuzhiyun 0x194f0, 0x194f8,
992*4882a593Smuzhiyun 0x19800, 0x19c08,
993*4882a593Smuzhiyun 0x19c10, 0x19c90,
994*4882a593Smuzhiyun 0x19ca0, 0x19ce4,
995*4882a593Smuzhiyun 0x19cf0, 0x19d40,
996*4882a593Smuzhiyun 0x19d50, 0x19d94,
997*4882a593Smuzhiyun 0x19da0, 0x19de8,
998*4882a593Smuzhiyun 0x19df0, 0x19e40,
999*4882a593Smuzhiyun 0x19e50, 0x19e90,
1000*4882a593Smuzhiyun 0x19ea0, 0x19f4c,
1001*4882a593Smuzhiyun 0x1a000, 0x1a004,
1002*4882a593Smuzhiyun 0x1a010, 0x1a06c,
1003*4882a593Smuzhiyun 0x1a0b0, 0x1a0e4,
1004*4882a593Smuzhiyun 0x1a0ec, 0x1a0f4,
1005*4882a593Smuzhiyun 0x1a100, 0x1a108,
1006*4882a593Smuzhiyun 0x1a114, 0x1a120,
1007*4882a593Smuzhiyun 0x1a128, 0x1a130,
1008*4882a593Smuzhiyun 0x1a138, 0x1a138,
1009*4882a593Smuzhiyun 0x1a190, 0x1a1c4,
1010*4882a593Smuzhiyun 0x1a1fc, 0x1a1fc,
1011*4882a593Smuzhiyun 0x1e040, 0x1e04c,
1012*4882a593Smuzhiyun 0x1e284, 0x1e28c,
1013*4882a593Smuzhiyun 0x1e2c0, 0x1e2c0,
1014*4882a593Smuzhiyun 0x1e2e0, 0x1e2e0,
1015*4882a593Smuzhiyun 0x1e300, 0x1e384,
1016*4882a593Smuzhiyun 0x1e3c0, 0x1e3c8,
1017*4882a593Smuzhiyun 0x1e440, 0x1e44c,
1018*4882a593Smuzhiyun 0x1e684, 0x1e68c,
1019*4882a593Smuzhiyun 0x1e6c0, 0x1e6c0,
1020*4882a593Smuzhiyun 0x1e6e0, 0x1e6e0,
1021*4882a593Smuzhiyun 0x1e700, 0x1e784,
1022*4882a593Smuzhiyun 0x1e7c0, 0x1e7c8,
1023*4882a593Smuzhiyun 0x1e840, 0x1e84c,
1024*4882a593Smuzhiyun 0x1ea84, 0x1ea8c,
1025*4882a593Smuzhiyun 0x1eac0, 0x1eac0,
1026*4882a593Smuzhiyun 0x1eae0, 0x1eae0,
1027*4882a593Smuzhiyun 0x1eb00, 0x1eb84,
1028*4882a593Smuzhiyun 0x1ebc0, 0x1ebc8,
1029*4882a593Smuzhiyun 0x1ec40, 0x1ec4c,
1030*4882a593Smuzhiyun 0x1ee84, 0x1ee8c,
1031*4882a593Smuzhiyun 0x1eec0, 0x1eec0,
1032*4882a593Smuzhiyun 0x1eee0, 0x1eee0,
1033*4882a593Smuzhiyun 0x1ef00, 0x1ef84,
1034*4882a593Smuzhiyun 0x1efc0, 0x1efc8,
1035*4882a593Smuzhiyun 0x1f040, 0x1f04c,
1036*4882a593Smuzhiyun 0x1f284, 0x1f28c,
1037*4882a593Smuzhiyun 0x1f2c0, 0x1f2c0,
1038*4882a593Smuzhiyun 0x1f2e0, 0x1f2e0,
1039*4882a593Smuzhiyun 0x1f300, 0x1f384,
1040*4882a593Smuzhiyun 0x1f3c0, 0x1f3c8,
1041*4882a593Smuzhiyun 0x1f440, 0x1f44c,
1042*4882a593Smuzhiyun 0x1f684, 0x1f68c,
1043*4882a593Smuzhiyun 0x1f6c0, 0x1f6c0,
1044*4882a593Smuzhiyun 0x1f6e0, 0x1f6e0,
1045*4882a593Smuzhiyun 0x1f700, 0x1f784,
1046*4882a593Smuzhiyun 0x1f7c0, 0x1f7c8,
1047*4882a593Smuzhiyun 0x1f840, 0x1f84c,
1048*4882a593Smuzhiyun 0x1fa84, 0x1fa8c,
1049*4882a593Smuzhiyun 0x1fac0, 0x1fac0,
1050*4882a593Smuzhiyun 0x1fae0, 0x1fae0,
1051*4882a593Smuzhiyun 0x1fb00, 0x1fb84,
1052*4882a593Smuzhiyun 0x1fbc0, 0x1fbc8,
1053*4882a593Smuzhiyun 0x1fc40, 0x1fc4c,
1054*4882a593Smuzhiyun 0x1fe84, 0x1fe8c,
1055*4882a593Smuzhiyun 0x1fec0, 0x1fec0,
1056*4882a593Smuzhiyun 0x1fee0, 0x1fee0,
1057*4882a593Smuzhiyun 0x1ff00, 0x1ff84,
1058*4882a593Smuzhiyun 0x1ffc0, 0x1ffc8,
1059*4882a593Smuzhiyun 0x20000, 0x2002c,
1060*4882a593Smuzhiyun 0x20100, 0x2013c,
1061*4882a593Smuzhiyun 0x20190, 0x201a0,
1062*4882a593Smuzhiyun 0x201a8, 0x201b8,
1063*4882a593Smuzhiyun 0x201c4, 0x201c8,
1064*4882a593Smuzhiyun 0x20200, 0x20318,
1065*4882a593Smuzhiyun 0x20400, 0x204b4,
1066*4882a593Smuzhiyun 0x204c0, 0x20528,
1067*4882a593Smuzhiyun 0x20540, 0x20614,
1068*4882a593Smuzhiyun 0x21000, 0x21040,
1069*4882a593Smuzhiyun 0x2104c, 0x21060,
1070*4882a593Smuzhiyun 0x210c0, 0x210ec,
1071*4882a593Smuzhiyun 0x21200, 0x21268,
1072*4882a593Smuzhiyun 0x21270, 0x21284,
1073*4882a593Smuzhiyun 0x212fc, 0x21388,
1074*4882a593Smuzhiyun 0x21400, 0x21404,
1075*4882a593Smuzhiyun 0x21500, 0x21500,
1076*4882a593Smuzhiyun 0x21510, 0x21518,
1077*4882a593Smuzhiyun 0x2152c, 0x21530,
1078*4882a593Smuzhiyun 0x2153c, 0x2153c,
1079*4882a593Smuzhiyun 0x21550, 0x21554,
1080*4882a593Smuzhiyun 0x21600, 0x21600,
1081*4882a593Smuzhiyun 0x21608, 0x2161c,
1082*4882a593Smuzhiyun 0x21624, 0x21628,
1083*4882a593Smuzhiyun 0x21630, 0x21634,
1084*4882a593Smuzhiyun 0x2163c, 0x2163c,
1085*4882a593Smuzhiyun 0x21700, 0x2171c,
1086*4882a593Smuzhiyun 0x21780, 0x2178c,
1087*4882a593Smuzhiyun 0x21800, 0x21818,
1088*4882a593Smuzhiyun 0x21820, 0x21828,
1089*4882a593Smuzhiyun 0x21830, 0x21848,
1090*4882a593Smuzhiyun 0x21850, 0x21854,
1091*4882a593Smuzhiyun 0x21860, 0x21868,
1092*4882a593Smuzhiyun 0x21870, 0x21870,
1093*4882a593Smuzhiyun 0x21878, 0x21898,
1094*4882a593Smuzhiyun 0x218a0, 0x218a8,
1095*4882a593Smuzhiyun 0x218b0, 0x218c8,
1096*4882a593Smuzhiyun 0x218d0, 0x218d4,
1097*4882a593Smuzhiyun 0x218e0, 0x218e8,
1098*4882a593Smuzhiyun 0x218f0, 0x218f0,
1099*4882a593Smuzhiyun 0x218f8, 0x21a18,
1100*4882a593Smuzhiyun 0x21a20, 0x21a28,
1101*4882a593Smuzhiyun 0x21a30, 0x21a48,
1102*4882a593Smuzhiyun 0x21a50, 0x21a54,
1103*4882a593Smuzhiyun 0x21a60, 0x21a68,
1104*4882a593Smuzhiyun 0x21a70, 0x21a70,
1105*4882a593Smuzhiyun 0x21a78, 0x21a98,
1106*4882a593Smuzhiyun 0x21aa0, 0x21aa8,
1107*4882a593Smuzhiyun 0x21ab0, 0x21ac8,
1108*4882a593Smuzhiyun 0x21ad0, 0x21ad4,
1109*4882a593Smuzhiyun 0x21ae0, 0x21ae8,
1110*4882a593Smuzhiyun 0x21af0, 0x21af0,
1111*4882a593Smuzhiyun 0x21af8, 0x21c18,
1112*4882a593Smuzhiyun 0x21c20, 0x21c20,
1113*4882a593Smuzhiyun 0x21c28, 0x21c30,
1114*4882a593Smuzhiyun 0x21c38, 0x21c38,
1115*4882a593Smuzhiyun 0x21c80, 0x21c98,
1116*4882a593Smuzhiyun 0x21ca0, 0x21ca8,
1117*4882a593Smuzhiyun 0x21cb0, 0x21cc8,
1118*4882a593Smuzhiyun 0x21cd0, 0x21cd4,
1119*4882a593Smuzhiyun 0x21ce0, 0x21ce8,
1120*4882a593Smuzhiyun 0x21cf0, 0x21cf0,
1121*4882a593Smuzhiyun 0x21cf8, 0x21d7c,
1122*4882a593Smuzhiyun 0x21e00, 0x21e04,
1123*4882a593Smuzhiyun 0x22000, 0x2202c,
1124*4882a593Smuzhiyun 0x22100, 0x2213c,
1125*4882a593Smuzhiyun 0x22190, 0x221a0,
1126*4882a593Smuzhiyun 0x221a8, 0x221b8,
1127*4882a593Smuzhiyun 0x221c4, 0x221c8,
1128*4882a593Smuzhiyun 0x22200, 0x22318,
1129*4882a593Smuzhiyun 0x22400, 0x224b4,
1130*4882a593Smuzhiyun 0x224c0, 0x22528,
1131*4882a593Smuzhiyun 0x22540, 0x22614,
1132*4882a593Smuzhiyun 0x23000, 0x23040,
1133*4882a593Smuzhiyun 0x2304c, 0x23060,
1134*4882a593Smuzhiyun 0x230c0, 0x230ec,
1135*4882a593Smuzhiyun 0x23200, 0x23268,
1136*4882a593Smuzhiyun 0x23270, 0x23284,
1137*4882a593Smuzhiyun 0x232fc, 0x23388,
1138*4882a593Smuzhiyun 0x23400, 0x23404,
1139*4882a593Smuzhiyun 0x23500, 0x23500,
1140*4882a593Smuzhiyun 0x23510, 0x23518,
1141*4882a593Smuzhiyun 0x2352c, 0x23530,
1142*4882a593Smuzhiyun 0x2353c, 0x2353c,
1143*4882a593Smuzhiyun 0x23550, 0x23554,
1144*4882a593Smuzhiyun 0x23600, 0x23600,
1145*4882a593Smuzhiyun 0x23608, 0x2361c,
1146*4882a593Smuzhiyun 0x23624, 0x23628,
1147*4882a593Smuzhiyun 0x23630, 0x23634,
1148*4882a593Smuzhiyun 0x2363c, 0x2363c,
1149*4882a593Smuzhiyun 0x23700, 0x2371c,
1150*4882a593Smuzhiyun 0x23780, 0x2378c,
1151*4882a593Smuzhiyun 0x23800, 0x23818,
1152*4882a593Smuzhiyun 0x23820, 0x23828,
1153*4882a593Smuzhiyun 0x23830, 0x23848,
1154*4882a593Smuzhiyun 0x23850, 0x23854,
1155*4882a593Smuzhiyun 0x23860, 0x23868,
1156*4882a593Smuzhiyun 0x23870, 0x23870,
1157*4882a593Smuzhiyun 0x23878, 0x23898,
1158*4882a593Smuzhiyun 0x238a0, 0x238a8,
1159*4882a593Smuzhiyun 0x238b0, 0x238c8,
1160*4882a593Smuzhiyun 0x238d0, 0x238d4,
1161*4882a593Smuzhiyun 0x238e0, 0x238e8,
1162*4882a593Smuzhiyun 0x238f0, 0x238f0,
1163*4882a593Smuzhiyun 0x238f8, 0x23a18,
1164*4882a593Smuzhiyun 0x23a20, 0x23a28,
1165*4882a593Smuzhiyun 0x23a30, 0x23a48,
1166*4882a593Smuzhiyun 0x23a50, 0x23a54,
1167*4882a593Smuzhiyun 0x23a60, 0x23a68,
1168*4882a593Smuzhiyun 0x23a70, 0x23a70,
1169*4882a593Smuzhiyun 0x23a78, 0x23a98,
1170*4882a593Smuzhiyun 0x23aa0, 0x23aa8,
1171*4882a593Smuzhiyun 0x23ab0, 0x23ac8,
1172*4882a593Smuzhiyun 0x23ad0, 0x23ad4,
1173*4882a593Smuzhiyun 0x23ae0, 0x23ae8,
1174*4882a593Smuzhiyun 0x23af0, 0x23af0,
1175*4882a593Smuzhiyun 0x23af8, 0x23c18,
1176*4882a593Smuzhiyun 0x23c20, 0x23c20,
1177*4882a593Smuzhiyun 0x23c28, 0x23c30,
1178*4882a593Smuzhiyun 0x23c38, 0x23c38,
1179*4882a593Smuzhiyun 0x23c80, 0x23c98,
1180*4882a593Smuzhiyun 0x23ca0, 0x23ca8,
1181*4882a593Smuzhiyun 0x23cb0, 0x23cc8,
1182*4882a593Smuzhiyun 0x23cd0, 0x23cd4,
1183*4882a593Smuzhiyun 0x23ce0, 0x23ce8,
1184*4882a593Smuzhiyun 0x23cf0, 0x23cf0,
1185*4882a593Smuzhiyun 0x23cf8, 0x23d7c,
1186*4882a593Smuzhiyun 0x23e00, 0x23e04,
1187*4882a593Smuzhiyun 0x24000, 0x2402c,
1188*4882a593Smuzhiyun 0x24100, 0x2413c,
1189*4882a593Smuzhiyun 0x24190, 0x241a0,
1190*4882a593Smuzhiyun 0x241a8, 0x241b8,
1191*4882a593Smuzhiyun 0x241c4, 0x241c8,
1192*4882a593Smuzhiyun 0x24200, 0x24318,
1193*4882a593Smuzhiyun 0x24400, 0x244b4,
1194*4882a593Smuzhiyun 0x244c0, 0x24528,
1195*4882a593Smuzhiyun 0x24540, 0x24614,
1196*4882a593Smuzhiyun 0x25000, 0x25040,
1197*4882a593Smuzhiyun 0x2504c, 0x25060,
1198*4882a593Smuzhiyun 0x250c0, 0x250ec,
1199*4882a593Smuzhiyun 0x25200, 0x25268,
1200*4882a593Smuzhiyun 0x25270, 0x25284,
1201*4882a593Smuzhiyun 0x252fc, 0x25388,
1202*4882a593Smuzhiyun 0x25400, 0x25404,
1203*4882a593Smuzhiyun 0x25500, 0x25500,
1204*4882a593Smuzhiyun 0x25510, 0x25518,
1205*4882a593Smuzhiyun 0x2552c, 0x25530,
1206*4882a593Smuzhiyun 0x2553c, 0x2553c,
1207*4882a593Smuzhiyun 0x25550, 0x25554,
1208*4882a593Smuzhiyun 0x25600, 0x25600,
1209*4882a593Smuzhiyun 0x25608, 0x2561c,
1210*4882a593Smuzhiyun 0x25624, 0x25628,
1211*4882a593Smuzhiyun 0x25630, 0x25634,
1212*4882a593Smuzhiyun 0x2563c, 0x2563c,
1213*4882a593Smuzhiyun 0x25700, 0x2571c,
1214*4882a593Smuzhiyun 0x25780, 0x2578c,
1215*4882a593Smuzhiyun 0x25800, 0x25818,
1216*4882a593Smuzhiyun 0x25820, 0x25828,
1217*4882a593Smuzhiyun 0x25830, 0x25848,
1218*4882a593Smuzhiyun 0x25850, 0x25854,
1219*4882a593Smuzhiyun 0x25860, 0x25868,
1220*4882a593Smuzhiyun 0x25870, 0x25870,
1221*4882a593Smuzhiyun 0x25878, 0x25898,
1222*4882a593Smuzhiyun 0x258a0, 0x258a8,
1223*4882a593Smuzhiyun 0x258b0, 0x258c8,
1224*4882a593Smuzhiyun 0x258d0, 0x258d4,
1225*4882a593Smuzhiyun 0x258e0, 0x258e8,
1226*4882a593Smuzhiyun 0x258f0, 0x258f0,
1227*4882a593Smuzhiyun 0x258f8, 0x25a18,
1228*4882a593Smuzhiyun 0x25a20, 0x25a28,
1229*4882a593Smuzhiyun 0x25a30, 0x25a48,
1230*4882a593Smuzhiyun 0x25a50, 0x25a54,
1231*4882a593Smuzhiyun 0x25a60, 0x25a68,
1232*4882a593Smuzhiyun 0x25a70, 0x25a70,
1233*4882a593Smuzhiyun 0x25a78, 0x25a98,
1234*4882a593Smuzhiyun 0x25aa0, 0x25aa8,
1235*4882a593Smuzhiyun 0x25ab0, 0x25ac8,
1236*4882a593Smuzhiyun 0x25ad0, 0x25ad4,
1237*4882a593Smuzhiyun 0x25ae0, 0x25ae8,
1238*4882a593Smuzhiyun 0x25af0, 0x25af0,
1239*4882a593Smuzhiyun 0x25af8, 0x25c18,
1240*4882a593Smuzhiyun 0x25c20, 0x25c20,
1241*4882a593Smuzhiyun 0x25c28, 0x25c30,
1242*4882a593Smuzhiyun 0x25c38, 0x25c38,
1243*4882a593Smuzhiyun 0x25c80, 0x25c98,
1244*4882a593Smuzhiyun 0x25ca0, 0x25ca8,
1245*4882a593Smuzhiyun 0x25cb0, 0x25cc8,
1246*4882a593Smuzhiyun 0x25cd0, 0x25cd4,
1247*4882a593Smuzhiyun 0x25ce0, 0x25ce8,
1248*4882a593Smuzhiyun 0x25cf0, 0x25cf0,
1249*4882a593Smuzhiyun 0x25cf8, 0x25d7c,
1250*4882a593Smuzhiyun 0x25e00, 0x25e04,
1251*4882a593Smuzhiyun 0x26000, 0x2602c,
1252*4882a593Smuzhiyun 0x26100, 0x2613c,
1253*4882a593Smuzhiyun 0x26190, 0x261a0,
1254*4882a593Smuzhiyun 0x261a8, 0x261b8,
1255*4882a593Smuzhiyun 0x261c4, 0x261c8,
1256*4882a593Smuzhiyun 0x26200, 0x26318,
1257*4882a593Smuzhiyun 0x26400, 0x264b4,
1258*4882a593Smuzhiyun 0x264c0, 0x26528,
1259*4882a593Smuzhiyun 0x26540, 0x26614,
1260*4882a593Smuzhiyun 0x27000, 0x27040,
1261*4882a593Smuzhiyun 0x2704c, 0x27060,
1262*4882a593Smuzhiyun 0x270c0, 0x270ec,
1263*4882a593Smuzhiyun 0x27200, 0x27268,
1264*4882a593Smuzhiyun 0x27270, 0x27284,
1265*4882a593Smuzhiyun 0x272fc, 0x27388,
1266*4882a593Smuzhiyun 0x27400, 0x27404,
1267*4882a593Smuzhiyun 0x27500, 0x27500,
1268*4882a593Smuzhiyun 0x27510, 0x27518,
1269*4882a593Smuzhiyun 0x2752c, 0x27530,
1270*4882a593Smuzhiyun 0x2753c, 0x2753c,
1271*4882a593Smuzhiyun 0x27550, 0x27554,
1272*4882a593Smuzhiyun 0x27600, 0x27600,
1273*4882a593Smuzhiyun 0x27608, 0x2761c,
1274*4882a593Smuzhiyun 0x27624, 0x27628,
1275*4882a593Smuzhiyun 0x27630, 0x27634,
1276*4882a593Smuzhiyun 0x2763c, 0x2763c,
1277*4882a593Smuzhiyun 0x27700, 0x2771c,
1278*4882a593Smuzhiyun 0x27780, 0x2778c,
1279*4882a593Smuzhiyun 0x27800, 0x27818,
1280*4882a593Smuzhiyun 0x27820, 0x27828,
1281*4882a593Smuzhiyun 0x27830, 0x27848,
1282*4882a593Smuzhiyun 0x27850, 0x27854,
1283*4882a593Smuzhiyun 0x27860, 0x27868,
1284*4882a593Smuzhiyun 0x27870, 0x27870,
1285*4882a593Smuzhiyun 0x27878, 0x27898,
1286*4882a593Smuzhiyun 0x278a0, 0x278a8,
1287*4882a593Smuzhiyun 0x278b0, 0x278c8,
1288*4882a593Smuzhiyun 0x278d0, 0x278d4,
1289*4882a593Smuzhiyun 0x278e0, 0x278e8,
1290*4882a593Smuzhiyun 0x278f0, 0x278f0,
1291*4882a593Smuzhiyun 0x278f8, 0x27a18,
1292*4882a593Smuzhiyun 0x27a20, 0x27a28,
1293*4882a593Smuzhiyun 0x27a30, 0x27a48,
1294*4882a593Smuzhiyun 0x27a50, 0x27a54,
1295*4882a593Smuzhiyun 0x27a60, 0x27a68,
1296*4882a593Smuzhiyun 0x27a70, 0x27a70,
1297*4882a593Smuzhiyun 0x27a78, 0x27a98,
1298*4882a593Smuzhiyun 0x27aa0, 0x27aa8,
1299*4882a593Smuzhiyun 0x27ab0, 0x27ac8,
1300*4882a593Smuzhiyun 0x27ad0, 0x27ad4,
1301*4882a593Smuzhiyun 0x27ae0, 0x27ae8,
1302*4882a593Smuzhiyun 0x27af0, 0x27af0,
1303*4882a593Smuzhiyun 0x27af8, 0x27c18,
1304*4882a593Smuzhiyun 0x27c20, 0x27c20,
1305*4882a593Smuzhiyun 0x27c28, 0x27c30,
1306*4882a593Smuzhiyun 0x27c38, 0x27c38,
1307*4882a593Smuzhiyun 0x27c80, 0x27c98,
1308*4882a593Smuzhiyun 0x27ca0, 0x27ca8,
1309*4882a593Smuzhiyun 0x27cb0, 0x27cc8,
1310*4882a593Smuzhiyun 0x27cd0, 0x27cd4,
1311*4882a593Smuzhiyun 0x27ce0, 0x27ce8,
1312*4882a593Smuzhiyun 0x27cf0, 0x27cf0,
1313*4882a593Smuzhiyun 0x27cf8, 0x27d7c,
1314*4882a593Smuzhiyun 0x27e00, 0x27e04,
1315*4882a593Smuzhiyun };
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun static const unsigned int t5_reg_ranges[] = {
1318*4882a593Smuzhiyun 0x1008, 0x10c0,
1319*4882a593Smuzhiyun 0x10cc, 0x10f8,
1320*4882a593Smuzhiyun 0x1100, 0x1100,
1321*4882a593Smuzhiyun 0x110c, 0x1148,
1322*4882a593Smuzhiyun 0x1180, 0x1184,
1323*4882a593Smuzhiyun 0x1190, 0x1194,
1324*4882a593Smuzhiyun 0x11a0, 0x11a4,
1325*4882a593Smuzhiyun 0x11b0, 0x11b4,
1326*4882a593Smuzhiyun 0x11fc, 0x123c,
1327*4882a593Smuzhiyun 0x1280, 0x173c,
1328*4882a593Smuzhiyun 0x1800, 0x18fc,
1329*4882a593Smuzhiyun 0x3000, 0x3028,
1330*4882a593Smuzhiyun 0x3060, 0x30b0,
1331*4882a593Smuzhiyun 0x30b8, 0x30d8,
1332*4882a593Smuzhiyun 0x30e0, 0x30fc,
1333*4882a593Smuzhiyun 0x3140, 0x357c,
1334*4882a593Smuzhiyun 0x35a8, 0x35cc,
1335*4882a593Smuzhiyun 0x35ec, 0x35ec,
1336*4882a593Smuzhiyun 0x3600, 0x5624,
1337*4882a593Smuzhiyun 0x56cc, 0x56ec,
1338*4882a593Smuzhiyun 0x56f4, 0x5720,
1339*4882a593Smuzhiyun 0x5728, 0x575c,
1340*4882a593Smuzhiyun 0x580c, 0x5814,
1341*4882a593Smuzhiyun 0x5890, 0x589c,
1342*4882a593Smuzhiyun 0x58a4, 0x58ac,
1343*4882a593Smuzhiyun 0x58b8, 0x58bc,
1344*4882a593Smuzhiyun 0x5940, 0x59c8,
1345*4882a593Smuzhiyun 0x59d0, 0x59dc,
1346*4882a593Smuzhiyun 0x59fc, 0x5a18,
1347*4882a593Smuzhiyun 0x5a60, 0x5a70,
1348*4882a593Smuzhiyun 0x5a80, 0x5a9c,
1349*4882a593Smuzhiyun 0x5b94, 0x5bfc,
1350*4882a593Smuzhiyun 0x6000, 0x6020,
1351*4882a593Smuzhiyun 0x6028, 0x6040,
1352*4882a593Smuzhiyun 0x6058, 0x609c,
1353*4882a593Smuzhiyun 0x60a8, 0x614c,
1354*4882a593Smuzhiyun 0x7700, 0x7798,
1355*4882a593Smuzhiyun 0x77c0, 0x78fc,
1356*4882a593Smuzhiyun 0x7b00, 0x7b58,
1357*4882a593Smuzhiyun 0x7b60, 0x7b84,
1358*4882a593Smuzhiyun 0x7b8c, 0x7c54,
1359*4882a593Smuzhiyun 0x7d00, 0x7d38,
1360*4882a593Smuzhiyun 0x7d40, 0x7d80,
1361*4882a593Smuzhiyun 0x7d8c, 0x7ddc,
1362*4882a593Smuzhiyun 0x7de4, 0x7e04,
1363*4882a593Smuzhiyun 0x7e10, 0x7e1c,
1364*4882a593Smuzhiyun 0x7e24, 0x7e38,
1365*4882a593Smuzhiyun 0x7e40, 0x7e44,
1366*4882a593Smuzhiyun 0x7e4c, 0x7e78,
1367*4882a593Smuzhiyun 0x7e80, 0x7edc,
1368*4882a593Smuzhiyun 0x7ee8, 0x7efc,
1369*4882a593Smuzhiyun 0x8dc0, 0x8de0,
1370*4882a593Smuzhiyun 0x8df8, 0x8e04,
1371*4882a593Smuzhiyun 0x8e10, 0x8e84,
1372*4882a593Smuzhiyun 0x8ea0, 0x8f84,
1373*4882a593Smuzhiyun 0x8fc0, 0x9058,
1374*4882a593Smuzhiyun 0x9060, 0x9060,
1375*4882a593Smuzhiyun 0x9068, 0x90f8,
1376*4882a593Smuzhiyun 0x9400, 0x9408,
1377*4882a593Smuzhiyun 0x9410, 0x9470,
1378*4882a593Smuzhiyun 0x9600, 0x9600,
1379*4882a593Smuzhiyun 0x9608, 0x9638,
1380*4882a593Smuzhiyun 0x9640, 0x96f4,
1381*4882a593Smuzhiyun 0x9800, 0x9808,
1382*4882a593Smuzhiyun 0x9810, 0x9864,
1383*4882a593Smuzhiyun 0x9c00, 0x9c6c,
1384*4882a593Smuzhiyun 0x9c80, 0x9cec,
1385*4882a593Smuzhiyun 0x9d00, 0x9d6c,
1386*4882a593Smuzhiyun 0x9d80, 0x9dec,
1387*4882a593Smuzhiyun 0x9e00, 0x9e6c,
1388*4882a593Smuzhiyun 0x9e80, 0x9eec,
1389*4882a593Smuzhiyun 0x9f00, 0x9f6c,
1390*4882a593Smuzhiyun 0x9f80, 0xa020,
1391*4882a593Smuzhiyun 0xd000, 0xd004,
1392*4882a593Smuzhiyun 0xd010, 0xd03c,
1393*4882a593Smuzhiyun 0xdfc0, 0xdfe0,
1394*4882a593Smuzhiyun 0xe000, 0x1106c,
1395*4882a593Smuzhiyun 0x11074, 0x11088,
1396*4882a593Smuzhiyun 0x1109c, 0x1117c,
1397*4882a593Smuzhiyun 0x11190, 0x11204,
1398*4882a593Smuzhiyun 0x19040, 0x1906c,
1399*4882a593Smuzhiyun 0x19078, 0x19080,
1400*4882a593Smuzhiyun 0x1908c, 0x190e8,
1401*4882a593Smuzhiyun 0x190f0, 0x190f8,
1402*4882a593Smuzhiyun 0x19100, 0x19110,
1403*4882a593Smuzhiyun 0x19120, 0x19124,
1404*4882a593Smuzhiyun 0x19150, 0x19194,
1405*4882a593Smuzhiyun 0x1919c, 0x191b0,
1406*4882a593Smuzhiyun 0x191d0, 0x191e8,
1407*4882a593Smuzhiyun 0x19238, 0x19290,
1408*4882a593Smuzhiyun 0x193f8, 0x19428,
1409*4882a593Smuzhiyun 0x19430, 0x19444,
1410*4882a593Smuzhiyun 0x1944c, 0x1946c,
1411*4882a593Smuzhiyun 0x19474, 0x19474,
1412*4882a593Smuzhiyun 0x19490, 0x194cc,
1413*4882a593Smuzhiyun 0x194f0, 0x194f8,
1414*4882a593Smuzhiyun 0x19c00, 0x19c08,
1415*4882a593Smuzhiyun 0x19c10, 0x19c60,
1416*4882a593Smuzhiyun 0x19c94, 0x19ce4,
1417*4882a593Smuzhiyun 0x19cf0, 0x19d40,
1418*4882a593Smuzhiyun 0x19d50, 0x19d94,
1419*4882a593Smuzhiyun 0x19da0, 0x19de8,
1420*4882a593Smuzhiyun 0x19df0, 0x19e10,
1421*4882a593Smuzhiyun 0x19e50, 0x19e90,
1422*4882a593Smuzhiyun 0x19ea0, 0x19f24,
1423*4882a593Smuzhiyun 0x19f34, 0x19f34,
1424*4882a593Smuzhiyun 0x19f40, 0x19f50,
1425*4882a593Smuzhiyun 0x19f90, 0x19fb4,
1426*4882a593Smuzhiyun 0x19fc4, 0x19fe4,
1427*4882a593Smuzhiyun 0x1a000, 0x1a004,
1428*4882a593Smuzhiyun 0x1a010, 0x1a06c,
1429*4882a593Smuzhiyun 0x1a0b0, 0x1a0e4,
1430*4882a593Smuzhiyun 0x1a0ec, 0x1a0f8,
1431*4882a593Smuzhiyun 0x1a100, 0x1a108,
1432*4882a593Smuzhiyun 0x1a114, 0x1a130,
1433*4882a593Smuzhiyun 0x1a138, 0x1a1c4,
1434*4882a593Smuzhiyun 0x1a1fc, 0x1a1fc,
1435*4882a593Smuzhiyun 0x1e008, 0x1e00c,
1436*4882a593Smuzhiyun 0x1e040, 0x1e044,
1437*4882a593Smuzhiyun 0x1e04c, 0x1e04c,
1438*4882a593Smuzhiyun 0x1e284, 0x1e290,
1439*4882a593Smuzhiyun 0x1e2c0, 0x1e2c0,
1440*4882a593Smuzhiyun 0x1e2e0, 0x1e2e0,
1441*4882a593Smuzhiyun 0x1e300, 0x1e384,
1442*4882a593Smuzhiyun 0x1e3c0, 0x1e3c8,
1443*4882a593Smuzhiyun 0x1e408, 0x1e40c,
1444*4882a593Smuzhiyun 0x1e440, 0x1e444,
1445*4882a593Smuzhiyun 0x1e44c, 0x1e44c,
1446*4882a593Smuzhiyun 0x1e684, 0x1e690,
1447*4882a593Smuzhiyun 0x1e6c0, 0x1e6c0,
1448*4882a593Smuzhiyun 0x1e6e0, 0x1e6e0,
1449*4882a593Smuzhiyun 0x1e700, 0x1e784,
1450*4882a593Smuzhiyun 0x1e7c0, 0x1e7c8,
1451*4882a593Smuzhiyun 0x1e808, 0x1e80c,
1452*4882a593Smuzhiyun 0x1e840, 0x1e844,
1453*4882a593Smuzhiyun 0x1e84c, 0x1e84c,
1454*4882a593Smuzhiyun 0x1ea84, 0x1ea90,
1455*4882a593Smuzhiyun 0x1eac0, 0x1eac0,
1456*4882a593Smuzhiyun 0x1eae0, 0x1eae0,
1457*4882a593Smuzhiyun 0x1eb00, 0x1eb84,
1458*4882a593Smuzhiyun 0x1ebc0, 0x1ebc8,
1459*4882a593Smuzhiyun 0x1ec08, 0x1ec0c,
1460*4882a593Smuzhiyun 0x1ec40, 0x1ec44,
1461*4882a593Smuzhiyun 0x1ec4c, 0x1ec4c,
1462*4882a593Smuzhiyun 0x1ee84, 0x1ee90,
1463*4882a593Smuzhiyun 0x1eec0, 0x1eec0,
1464*4882a593Smuzhiyun 0x1eee0, 0x1eee0,
1465*4882a593Smuzhiyun 0x1ef00, 0x1ef84,
1466*4882a593Smuzhiyun 0x1efc0, 0x1efc8,
1467*4882a593Smuzhiyun 0x1f008, 0x1f00c,
1468*4882a593Smuzhiyun 0x1f040, 0x1f044,
1469*4882a593Smuzhiyun 0x1f04c, 0x1f04c,
1470*4882a593Smuzhiyun 0x1f284, 0x1f290,
1471*4882a593Smuzhiyun 0x1f2c0, 0x1f2c0,
1472*4882a593Smuzhiyun 0x1f2e0, 0x1f2e0,
1473*4882a593Smuzhiyun 0x1f300, 0x1f384,
1474*4882a593Smuzhiyun 0x1f3c0, 0x1f3c8,
1475*4882a593Smuzhiyun 0x1f408, 0x1f40c,
1476*4882a593Smuzhiyun 0x1f440, 0x1f444,
1477*4882a593Smuzhiyun 0x1f44c, 0x1f44c,
1478*4882a593Smuzhiyun 0x1f684, 0x1f690,
1479*4882a593Smuzhiyun 0x1f6c0, 0x1f6c0,
1480*4882a593Smuzhiyun 0x1f6e0, 0x1f6e0,
1481*4882a593Smuzhiyun 0x1f700, 0x1f784,
1482*4882a593Smuzhiyun 0x1f7c0, 0x1f7c8,
1483*4882a593Smuzhiyun 0x1f808, 0x1f80c,
1484*4882a593Smuzhiyun 0x1f840, 0x1f844,
1485*4882a593Smuzhiyun 0x1f84c, 0x1f84c,
1486*4882a593Smuzhiyun 0x1fa84, 0x1fa90,
1487*4882a593Smuzhiyun 0x1fac0, 0x1fac0,
1488*4882a593Smuzhiyun 0x1fae0, 0x1fae0,
1489*4882a593Smuzhiyun 0x1fb00, 0x1fb84,
1490*4882a593Smuzhiyun 0x1fbc0, 0x1fbc8,
1491*4882a593Smuzhiyun 0x1fc08, 0x1fc0c,
1492*4882a593Smuzhiyun 0x1fc40, 0x1fc44,
1493*4882a593Smuzhiyun 0x1fc4c, 0x1fc4c,
1494*4882a593Smuzhiyun 0x1fe84, 0x1fe90,
1495*4882a593Smuzhiyun 0x1fec0, 0x1fec0,
1496*4882a593Smuzhiyun 0x1fee0, 0x1fee0,
1497*4882a593Smuzhiyun 0x1ff00, 0x1ff84,
1498*4882a593Smuzhiyun 0x1ffc0, 0x1ffc8,
1499*4882a593Smuzhiyun 0x30000, 0x30030,
1500*4882a593Smuzhiyun 0x30100, 0x30144,
1501*4882a593Smuzhiyun 0x30190, 0x301a0,
1502*4882a593Smuzhiyun 0x301a8, 0x301b8,
1503*4882a593Smuzhiyun 0x301c4, 0x301c8,
1504*4882a593Smuzhiyun 0x301d0, 0x301d0,
1505*4882a593Smuzhiyun 0x30200, 0x30318,
1506*4882a593Smuzhiyun 0x30400, 0x304b4,
1507*4882a593Smuzhiyun 0x304c0, 0x3052c,
1508*4882a593Smuzhiyun 0x30540, 0x3061c,
1509*4882a593Smuzhiyun 0x30800, 0x30828,
1510*4882a593Smuzhiyun 0x30834, 0x30834,
1511*4882a593Smuzhiyun 0x308c0, 0x30908,
1512*4882a593Smuzhiyun 0x30910, 0x309ac,
1513*4882a593Smuzhiyun 0x30a00, 0x30a14,
1514*4882a593Smuzhiyun 0x30a1c, 0x30a2c,
1515*4882a593Smuzhiyun 0x30a44, 0x30a50,
1516*4882a593Smuzhiyun 0x30a74, 0x30a74,
1517*4882a593Smuzhiyun 0x30a7c, 0x30afc,
1518*4882a593Smuzhiyun 0x30b08, 0x30c24,
1519*4882a593Smuzhiyun 0x30d00, 0x30d00,
1520*4882a593Smuzhiyun 0x30d08, 0x30d14,
1521*4882a593Smuzhiyun 0x30d1c, 0x30d20,
1522*4882a593Smuzhiyun 0x30d3c, 0x30d3c,
1523*4882a593Smuzhiyun 0x30d48, 0x30d50,
1524*4882a593Smuzhiyun 0x31200, 0x3120c,
1525*4882a593Smuzhiyun 0x31220, 0x31220,
1526*4882a593Smuzhiyun 0x31240, 0x31240,
1527*4882a593Smuzhiyun 0x31600, 0x3160c,
1528*4882a593Smuzhiyun 0x31a00, 0x31a1c,
1529*4882a593Smuzhiyun 0x31e00, 0x31e20,
1530*4882a593Smuzhiyun 0x31e38, 0x31e3c,
1531*4882a593Smuzhiyun 0x31e80, 0x31e80,
1532*4882a593Smuzhiyun 0x31e88, 0x31ea8,
1533*4882a593Smuzhiyun 0x31eb0, 0x31eb4,
1534*4882a593Smuzhiyun 0x31ec8, 0x31ed4,
1535*4882a593Smuzhiyun 0x31fb8, 0x32004,
1536*4882a593Smuzhiyun 0x32200, 0x32200,
1537*4882a593Smuzhiyun 0x32208, 0x32240,
1538*4882a593Smuzhiyun 0x32248, 0x32280,
1539*4882a593Smuzhiyun 0x32288, 0x322c0,
1540*4882a593Smuzhiyun 0x322c8, 0x322fc,
1541*4882a593Smuzhiyun 0x32600, 0x32630,
1542*4882a593Smuzhiyun 0x32a00, 0x32abc,
1543*4882a593Smuzhiyun 0x32b00, 0x32b10,
1544*4882a593Smuzhiyun 0x32b20, 0x32b30,
1545*4882a593Smuzhiyun 0x32b40, 0x32b50,
1546*4882a593Smuzhiyun 0x32b60, 0x32b70,
1547*4882a593Smuzhiyun 0x33000, 0x33028,
1548*4882a593Smuzhiyun 0x33030, 0x33048,
1549*4882a593Smuzhiyun 0x33060, 0x33068,
1550*4882a593Smuzhiyun 0x33070, 0x3309c,
1551*4882a593Smuzhiyun 0x330f0, 0x33128,
1552*4882a593Smuzhiyun 0x33130, 0x33148,
1553*4882a593Smuzhiyun 0x33160, 0x33168,
1554*4882a593Smuzhiyun 0x33170, 0x3319c,
1555*4882a593Smuzhiyun 0x331f0, 0x33238,
1556*4882a593Smuzhiyun 0x33240, 0x33240,
1557*4882a593Smuzhiyun 0x33248, 0x33250,
1558*4882a593Smuzhiyun 0x3325c, 0x33264,
1559*4882a593Smuzhiyun 0x33270, 0x332b8,
1560*4882a593Smuzhiyun 0x332c0, 0x332e4,
1561*4882a593Smuzhiyun 0x332f8, 0x33338,
1562*4882a593Smuzhiyun 0x33340, 0x33340,
1563*4882a593Smuzhiyun 0x33348, 0x33350,
1564*4882a593Smuzhiyun 0x3335c, 0x33364,
1565*4882a593Smuzhiyun 0x33370, 0x333b8,
1566*4882a593Smuzhiyun 0x333c0, 0x333e4,
1567*4882a593Smuzhiyun 0x333f8, 0x33428,
1568*4882a593Smuzhiyun 0x33430, 0x33448,
1569*4882a593Smuzhiyun 0x33460, 0x33468,
1570*4882a593Smuzhiyun 0x33470, 0x3349c,
1571*4882a593Smuzhiyun 0x334f0, 0x33528,
1572*4882a593Smuzhiyun 0x33530, 0x33548,
1573*4882a593Smuzhiyun 0x33560, 0x33568,
1574*4882a593Smuzhiyun 0x33570, 0x3359c,
1575*4882a593Smuzhiyun 0x335f0, 0x33638,
1576*4882a593Smuzhiyun 0x33640, 0x33640,
1577*4882a593Smuzhiyun 0x33648, 0x33650,
1578*4882a593Smuzhiyun 0x3365c, 0x33664,
1579*4882a593Smuzhiyun 0x33670, 0x336b8,
1580*4882a593Smuzhiyun 0x336c0, 0x336e4,
1581*4882a593Smuzhiyun 0x336f8, 0x33738,
1582*4882a593Smuzhiyun 0x33740, 0x33740,
1583*4882a593Smuzhiyun 0x33748, 0x33750,
1584*4882a593Smuzhiyun 0x3375c, 0x33764,
1585*4882a593Smuzhiyun 0x33770, 0x337b8,
1586*4882a593Smuzhiyun 0x337c0, 0x337e4,
1587*4882a593Smuzhiyun 0x337f8, 0x337fc,
1588*4882a593Smuzhiyun 0x33814, 0x33814,
1589*4882a593Smuzhiyun 0x3382c, 0x3382c,
1590*4882a593Smuzhiyun 0x33880, 0x3388c,
1591*4882a593Smuzhiyun 0x338e8, 0x338ec,
1592*4882a593Smuzhiyun 0x33900, 0x33928,
1593*4882a593Smuzhiyun 0x33930, 0x33948,
1594*4882a593Smuzhiyun 0x33960, 0x33968,
1595*4882a593Smuzhiyun 0x33970, 0x3399c,
1596*4882a593Smuzhiyun 0x339f0, 0x33a38,
1597*4882a593Smuzhiyun 0x33a40, 0x33a40,
1598*4882a593Smuzhiyun 0x33a48, 0x33a50,
1599*4882a593Smuzhiyun 0x33a5c, 0x33a64,
1600*4882a593Smuzhiyun 0x33a70, 0x33ab8,
1601*4882a593Smuzhiyun 0x33ac0, 0x33ae4,
1602*4882a593Smuzhiyun 0x33af8, 0x33b10,
1603*4882a593Smuzhiyun 0x33b28, 0x33b28,
1604*4882a593Smuzhiyun 0x33b3c, 0x33b50,
1605*4882a593Smuzhiyun 0x33bf0, 0x33c10,
1606*4882a593Smuzhiyun 0x33c28, 0x33c28,
1607*4882a593Smuzhiyun 0x33c3c, 0x33c50,
1608*4882a593Smuzhiyun 0x33cf0, 0x33cfc,
1609*4882a593Smuzhiyun 0x34000, 0x34030,
1610*4882a593Smuzhiyun 0x34100, 0x34144,
1611*4882a593Smuzhiyun 0x34190, 0x341a0,
1612*4882a593Smuzhiyun 0x341a8, 0x341b8,
1613*4882a593Smuzhiyun 0x341c4, 0x341c8,
1614*4882a593Smuzhiyun 0x341d0, 0x341d0,
1615*4882a593Smuzhiyun 0x34200, 0x34318,
1616*4882a593Smuzhiyun 0x34400, 0x344b4,
1617*4882a593Smuzhiyun 0x344c0, 0x3452c,
1618*4882a593Smuzhiyun 0x34540, 0x3461c,
1619*4882a593Smuzhiyun 0x34800, 0x34828,
1620*4882a593Smuzhiyun 0x34834, 0x34834,
1621*4882a593Smuzhiyun 0x348c0, 0x34908,
1622*4882a593Smuzhiyun 0x34910, 0x349ac,
1623*4882a593Smuzhiyun 0x34a00, 0x34a14,
1624*4882a593Smuzhiyun 0x34a1c, 0x34a2c,
1625*4882a593Smuzhiyun 0x34a44, 0x34a50,
1626*4882a593Smuzhiyun 0x34a74, 0x34a74,
1627*4882a593Smuzhiyun 0x34a7c, 0x34afc,
1628*4882a593Smuzhiyun 0x34b08, 0x34c24,
1629*4882a593Smuzhiyun 0x34d00, 0x34d00,
1630*4882a593Smuzhiyun 0x34d08, 0x34d14,
1631*4882a593Smuzhiyun 0x34d1c, 0x34d20,
1632*4882a593Smuzhiyun 0x34d3c, 0x34d3c,
1633*4882a593Smuzhiyun 0x34d48, 0x34d50,
1634*4882a593Smuzhiyun 0x35200, 0x3520c,
1635*4882a593Smuzhiyun 0x35220, 0x35220,
1636*4882a593Smuzhiyun 0x35240, 0x35240,
1637*4882a593Smuzhiyun 0x35600, 0x3560c,
1638*4882a593Smuzhiyun 0x35a00, 0x35a1c,
1639*4882a593Smuzhiyun 0x35e00, 0x35e20,
1640*4882a593Smuzhiyun 0x35e38, 0x35e3c,
1641*4882a593Smuzhiyun 0x35e80, 0x35e80,
1642*4882a593Smuzhiyun 0x35e88, 0x35ea8,
1643*4882a593Smuzhiyun 0x35eb0, 0x35eb4,
1644*4882a593Smuzhiyun 0x35ec8, 0x35ed4,
1645*4882a593Smuzhiyun 0x35fb8, 0x36004,
1646*4882a593Smuzhiyun 0x36200, 0x36200,
1647*4882a593Smuzhiyun 0x36208, 0x36240,
1648*4882a593Smuzhiyun 0x36248, 0x36280,
1649*4882a593Smuzhiyun 0x36288, 0x362c0,
1650*4882a593Smuzhiyun 0x362c8, 0x362fc,
1651*4882a593Smuzhiyun 0x36600, 0x36630,
1652*4882a593Smuzhiyun 0x36a00, 0x36abc,
1653*4882a593Smuzhiyun 0x36b00, 0x36b10,
1654*4882a593Smuzhiyun 0x36b20, 0x36b30,
1655*4882a593Smuzhiyun 0x36b40, 0x36b50,
1656*4882a593Smuzhiyun 0x36b60, 0x36b70,
1657*4882a593Smuzhiyun 0x37000, 0x37028,
1658*4882a593Smuzhiyun 0x37030, 0x37048,
1659*4882a593Smuzhiyun 0x37060, 0x37068,
1660*4882a593Smuzhiyun 0x37070, 0x3709c,
1661*4882a593Smuzhiyun 0x370f0, 0x37128,
1662*4882a593Smuzhiyun 0x37130, 0x37148,
1663*4882a593Smuzhiyun 0x37160, 0x37168,
1664*4882a593Smuzhiyun 0x37170, 0x3719c,
1665*4882a593Smuzhiyun 0x371f0, 0x37238,
1666*4882a593Smuzhiyun 0x37240, 0x37240,
1667*4882a593Smuzhiyun 0x37248, 0x37250,
1668*4882a593Smuzhiyun 0x3725c, 0x37264,
1669*4882a593Smuzhiyun 0x37270, 0x372b8,
1670*4882a593Smuzhiyun 0x372c0, 0x372e4,
1671*4882a593Smuzhiyun 0x372f8, 0x37338,
1672*4882a593Smuzhiyun 0x37340, 0x37340,
1673*4882a593Smuzhiyun 0x37348, 0x37350,
1674*4882a593Smuzhiyun 0x3735c, 0x37364,
1675*4882a593Smuzhiyun 0x37370, 0x373b8,
1676*4882a593Smuzhiyun 0x373c0, 0x373e4,
1677*4882a593Smuzhiyun 0x373f8, 0x37428,
1678*4882a593Smuzhiyun 0x37430, 0x37448,
1679*4882a593Smuzhiyun 0x37460, 0x37468,
1680*4882a593Smuzhiyun 0x37470, 0x3749c,
1681*4882a593Smuzhiyun 0x374f0, 0x37528,
1682*4882a593Smuzhiyun 0x37530, 0x37548,
1683*4882a593Smuzhiyun 0x37560, 0x37568,
1684*4882a593Smuzhiyun 0x37570, 0x3759c,
1685*4882a593Smuzhiyun 0x375f0, 0x37638,
1686*4882a593Smuzhiyun 0x37640, 0x37640,
1687*4882a593Smuzhiyun 0x37648, 0x37650,
1688*4882a593Smuzhiyun 0x3765c, 0x37664,
1689*4882a593Smuzhiyun 0x37670, 0x376b8,
1690*4882a593Smuzhiyun 0x376c0, 0x376e4,
1691*4882a593Smuzhiyun 0x376f8, 0x37738,
1692*4882a593Smuzhiyun 0x37740, 0x37740,
1693*4882a593Smuzhiyun 0x37748, 0x37750,
1694*4882a593Smuzhiyun 0x3775c, 0x37764,
1695*4882a593Smuzhiyun 0x37770, 0x377b8,
1696*4882a593Smuzhiyun 0x377c0, 0x377e4,
1697*4882a593Smuzhiyun 0x377f8, 0x377fc,
1698*4882a593Smuzhiyun 0x37814, 0x37814,
1699*4882a593Smuzhiyun 0x3782c, 0x3782c,
1700*4882a593Smuzhiyun 0x37880, 0x3788c,
1701*4882a593Smuzhiyun 0x378e8, 0x378ec,
1702*4882a593Smuzhiyun 0x37900, 0x37928,
1703*4882a593Smuzhiyun 0x37930, 0x37948,
1704*4882a593Smuzhiyun 0x37960, 0x37968,
1705*4882a593Smuzhiyun 0x37970, 0x3799c,
1706*4882a593Smuzhiyun 0x379f0, 0x37a38,
1707*4882a593Smuzhiyun 0x37a40, 0x37a40,
1708*4882a593Smuzhiyun 0x37a48, 0x37a50,
1709*4882a593Smuzhiyun 0x37a5c, 0x37a64,
1710*4882a593Smuzhiyun 0x37a70, 0x37ab8,
1711*4882a593Smuzhiyun 0x37ac0, 0x37ae4,
1712*4882a593Smuzhiyun 0x37af8, 0x37b10,
1713*4882a593Smuzhiyun 0x37b28, 0x37b28,
1714*4882a593Smuzhiyun 0x37b3c, 0x37b50,
1715*4882a593Smuzhiyun 0x37bf0, 0x37c10,
1716*4882a593Smuzhiyun 0x37c28, 0x37c28,
1717*4882a593Smuzhiyun 0x37c3c, 0x37c50,
1718*4882a593Smuzhiyun 0x37cf0, 0x37cfc,
1719*4882a593Smuzhiyun 0x38000, 0x38030,
1720*4882a593Smuzhiyun 0x38100, 0x38144,
1721*4882a593Smuzhiyun 0x38190, 0x381a0,
1722*4882a593Smuzhiyun 0x381a8, 0x381b8,
1723*4882a593Smuzhiyun 0x381c4, 0x381c8,
1724*4882a593Smuzhiyun 0x381d0, 0x381d0,
1725*4882a593Smuzhiyun 0x38200, 0x38318,
1726*4882a593Smuzhiyun 0x38400, 0x384b4,
1727*4882a593Smuzhiyun 0x384c0, 0x3852c,
1728*4882a593Smuzhiyun 0x38540, 0x3861c,
1729*4882a593Smuzhiyun 0x38800, 0x38828,
1730*4882a593Smuzhiyun 0x38834, 0x38834,
1731*4882a593Smuzhiyun 0x388c0, 0x38908,
1732*4882a593Smuzhiyun 0x38910, 0x389ac,
1733*4882a593Smuzhiyun 0x38a00, 0x38a14,
1734*4882a593Smuzhiyun 0x38a1c, 0x38a2c,
1735*4882a593Smuzhiyun 0x38a44, 0x38a50,
1736*4882a593Smuzhiyun 0x38a74, 0x38a74,
1737*4882a593Smuzhiyun 0x38a7c, 0x38afc,
1738*4882a593Smuzhiyun 0x38b08, 0x38c24,
1739*4882a593Smuzhiyun 0x38d00, 0x38d00,
1740*4882a593Smuzhiyun 0x38d08, 0x38d14,
1741*4882a593Smuzhiyun 0x38d1c, 0x38d20,
1742*4882a593Smuzhiyun 0x38d3c, 0x38d3c,
1743*4882a593Smuzhiyun 0x38d48, 0x38d50,
1744*4882a593Smuzhiyun 0x39200, 0x3920c,
1745*4882a593Smuzhiyun 0x39220, 0x39220,
1746*4882a593Smuzhiyun 0x39240, 0x39240,
1747*4882a593Smuzhiyun 0x39600, 0x3960c,
1748*4882a593Smuzhiyun 0x39a00, 0x39a1c,
1749*4882a593Smuzhiyun 0x39e00, 0x39e20,
1750*4882a593Smuzhiyun 0x39e38, 0x39e3c,
1751*4882a593Smuzhiyun 0x39e80, 0x39e80,
1752*4882a593Smuzhiyun 0x39e88, 0x39ea8,
1753*4882a593Smuzhiyun 0x39eb0, 0x39eb4,
1754*4882a593Smuzhiyun 0x39ec8, 0x39ed4,
1755*4882a593Smuzhiyun 0x39fb8, 0x3a004,
1756*4882a593Smuzhiyun 0x3a200, 0x3a200,
1757*4882a593Smuzhiyun 0x3a208, 0x3a240,
1758*4882a593Smuzhiyun 0x3a248, 0x3a280,
1759*4882a593Smuzhiyun 0x3a288, 0x3a2c0,
1760*4882a593Smuzhiyun 0x3a2c8, 0x3a2fc,
1761*4882a593Smuzhiyun 0x3a600, 0x3a630,
1762*4882a593Smuzhiyun 0x3aa00, 0x3aabc,
1763*4882a593Smuzhiyun 0x3ab00, 0x3ab10,
1764*4882a593Smuzhiyun 0x3ab20, 0x3ab30,
1765*4882a593Smuzhiyun 0x3ab40, 0x3ab50,
1766*4882a593Smuzhiyun 0x3ab60, 0x3ab70,
1767*4882a593Smuzhiyun 0x3b000, 0x3b028,
1768*4882a593Smuzhiyun 0x3b030, 0x3b048,
1769*4882a593Smuzhiyun 0x3b060, 0x3b068,
1770*4882a593Smuzhiyun 0x3b070, 0x3b09c,
1771*4882a593Smuzhiyun 0x3b0f0, 0x3b128,
1772*4882a593Smuzhiyun 0x3b130, 0x3b148,
1773*4882a593Smuzhiyun 0x3b160, 0x3b168,
1774*4882a593Smuzhiyun 0x3b170, 0x3b19c,
1775*4882a593Smuzhiyun 0x3b1f0, 0x3b238,
1776*4882a593Smuzhiyun 0x3b240, 0x3b240,
1777*4882a593Smuzhiyun 0x3b248, 0x3b250,
1778*4882a593Smuzhiyun 0x3b25c, 0x3b264,
1779*4882a593Smuzhiyun 0x3b270, 0x3b2b8,
1780*4882a593Smuzhiyun 0x3b2c0, 0x3b2e4,
1781*4882a593Smuzhiyun 0x3b2f8, 0x3b338,
1782*4882a593Smuzhiyun 0x3b340, 0x3b340,
1783*4882a593Smuzhiyun 0x3b348, 0x3b350,
1784*4882a593Smuzhiyun 0x3b35c, 0x3b364,
1785*4882a593Smuzhiyun 0x3b370, 0x3b3b8,
1786*4882a593Smuzhiyun 0x3b3c0, 0x3b3e4,
1787*4882a593Smuzhiyun 0x3b3f8, 0x3b428,
1788*4882a593Smuzhiyun 0x3b430, 0x3b448,
1789*4882a593Smuzhiyun 0x3b460, 0x3b468,
1790*4882a593Smuzhiyun 0x3b470, 0x3b49c,
1791*4882a593Smuzhiyun 0x3b4f0, 0x3b528,
1792*4882a593Smuzhiyun 0x3b530, 0x3b548,
1793*4882a593Smuzhiyun 0x3b560, 0x3b568,
1794*4882a593Smuzhiyun 0x3b570, 0x3b59c,
1795*4882a593Smuzhiyun 0x3b5f0, 0x3b638,
1796*4882a593Smuzhiyun 0x3b640, 0x3b640,
1797*4882a593Smuzhiyun 0x3b648, 0x3b650,
1798*4882a593Smuzhiyun 0x3b65c, 0x3b664,
1799*4882a593Smuzhiyun 0x3b670, 0x3b6b8,
1800*4882a593Smuzhiyun 0x3b6c0, 0x3b6e4,
1801*4882a593Smuzhiyun 0x3b6f8, 0x3b738,
1802*4882a593Smuzhiyun 0x3b740, 0x3b740,
1803*4882a593Smuzhiyun 0x3b748, 0x3b750,
1804*4882a593Smuzhiyun 0x3b75c, 0x3b764,
1805*4882a593Smuzhiyun 0x3b770, 0x3b7b8,
1806*4882a593Smuzhiyun 0x3b7c0, 0x3b7e4,
1807*4882a593Smuzhiyun 0x3b7f8, 0x3b7fc,
1808*4882a593Smuzhiyun 0x3b814, 0x3b814,
1809*4882a593Smuzhiyun 0x3b82c, 0x3b82c,
1810*4882a593Smuzhiyun 0x3b880, 0x3b88c,
1811*4882a593Smuzhiyun 0x3b8e8, 0x3b8ec,
1812*4882a593Smuzhiyun 0x3b900, 0x3b928,
1813*4882a593Smuzhiyun 0x3b930, 0x3b948,
1814*4882a593Smuzhiyun 0x3b960, 0x3b968,
1815*4882a593Smuzhiyun 0x3b970, 0x3b99c,
1816*4882a593Smuzhiyun 0x3b9f0, 0x3ba38,
1817*4882a593Smuzhiyun 0x3ba40, 0x3ba40,
1818*4882a593Smuzhiyun 0x3ba48, 0x3ba50,
1819*4882a593Smuzhiyun 0x3ba5c, 0x3ba64,
1820*4882a593Smuzhiyun 0x3ba70, 0x3bab8,
1821*4882a593Smuzhiyun 0x3bac0, 0x3bae4,
1822*4882a593Smuzhiyun 0x3baf8, 0x3bb10,
1823*4882a593Smuzhiyun 0x3bb28, 0x3bb28,
1824*4882a593Smuzhiyun 0x3bb3c, 0x3bb50,
1825*4882a593Smuzhiyun 0x3bbf0, 0x3bc10,
1826*4882a593Smuzhiyun 0x3bc28, 0x3bc28,
1827*4882a593Smuzhiyun 0x3bc3c, 0x3bc50,
1828*4882a593Smuzhiyun 0x3bcf0, 0x3bcfc,
1829*4882a593Smuzhiyun 0x3c000, 0x3c030,
1830*4882a593Smuzhiyun 0x3c100, 0x3c144,
1831*4882a593Smuzhiyun 0x3c190, 0x3c1a0,
1832*4882a593Smuzhiyun 0x3c1a8, 0x3c1b8,
1833*4882a593Smuzhiyun 0x3c1c4, 0x3c1c8,
1834*4882a593Smuzhiyun 0x3c1d0, 0x3c1d0,
1835*4882a593Smuzhiyun 0x3c200, 0x3c318,
1836*4882a593Smuzhiyun 0x3c400, 0x3c4b4,
1837*4882a593Smuzhiyun 0x3c4c0, 0x3c52c,
1838*4882a593Smuzhiyun 0x3c540, 0x3c61c,
1839*4882a593Smuzhiyun 0x3c800, 0x3c828,
1840*4882a593Smuzhiyun 0x3c834, 0x3c834,
1841*4882a593Smuzhiyun 0x3c8c0, 0x3c908,
1842*4882a593Smuzhiyun 0x3c910, 0x3c9ac,
1843*4882a593Smuzhiyun 0x3ca00, 0x3ca14,
1844*4882a593Smuzhiyun 0x3ca1c, 0x3ca2c,
1845*4882a593Smuzhiyun 0x3ca44, 0x3ca50,
1846*4882a593Smuzhiyun 0x3ca74, 0x3ca74,
1847*4882a593Smuzhiyun 0x3ca7c, 0x3cafc,
1848*4882a593Smuzhiyun 0x3cb08, 0x3cc24,
1849*4882a593Smuzhiyun 0x3cd00, 0x3cd00,
1850*4882a593Smuzhiyun 0x3cd08, 0x3cd14,
1851*4882a593Smuzhiyun 0x3cd1c, 0x3cd20,
1852*4882a593Smuzhiyun 0x3cd3c, 0x3cd3c,
1853*4882a593Smuzhiyun 0x3cd48, 0x3cd50,
1854*4882a593Smuzhiyun 0x3d200, 0x3d20c,
1855*4882a593Smuzhiyun 0x3d220, 0x3d220,
1856*4882a593Smuzhiyun 0x3d240, 0x3d240,
1857*4882a593Smuzhiyun 0x3d600, 0x3d60c,
1858*4882a593Smuzhiyun 0x3da00, 0x3da1c,
1859*4882a593Smuzhiyun 0x3de00, 0x3de20,
1860*4882a593Smuzhiyun 0x3de38, 0x3de3c,
1861*4882a593Smuzhiyun 0x3de80, 0x3de80,
1862*4882a593Smuzhiyun 0x3de88, 0x3dea8,
1863*4882a593Smuzhiyun 0x3deb0, 0x3deb4,
1864*4882a593Smuzhiyun 0x3dec8, 0x3ded4,
1865*4882a593Smuzhiyun 0x3dfb8, 0x3e004,
1866*4882a593Smuzhiyun 0x3e200, 0x3e200,
1867*4882a593Smuzhiyun 0x3e208, 0x3e240,
1868*4882a593Smuzhiyun 0x3e248, 0x3e280,
1869*4882a593Smuzhiyun 0x3e288, 0x3e2c0,
1870*4882a593Smuzhiyun 0x3e2c8, 0x3e2fc,
1871*4882a593Smuzhiyun 0x3e600, 0x3e630,
1872*4882a593Smuzhiyun 0x3ea00, 0x3eabc,
1873*4882a593Smuzhiyun 0x3eb00, 0x3eb10,
1874*4882a593Smuzhiyun 0x3eb20, 0x3eb30,
1875*4882a593Smuzhiyun 0x3eb40, 0x3eb50,
1876*4882a593Smuzhiyun 0x3eb60, 0x3eb70,
1877*4882a593Smuzhiyun 0x3f000, 0x3f028,
1878*4882a593Smuzhiyun 0x3f030, 0x3f048,
1879*4882a593Smuzhiyun 0x3f060, 0x3f068,
1880*4882a593Smuzhiyun 0x3f070, 0x3f09c,
1881*4882a593Smuzhiyun 0x3f0f0, 0x3f128,
1882*4882a593Smuzhiyun 0x3f130, 0x3f148,
1883*4882a593Smuzhiyun 0x3f160, 0x3f168,
1884*4882a593Smuzhiyun 0x3f170, 0x3f19c,
1885*4882a593Smuzhiyun 0x3f1f0, 0x3f238,
1886*4882a593Smuzhiyun 0x3f240, 0x3f240,
1887*4882a593Smuzhiyun 0x3f248, 0x3f250,
1888*4882a593Smuzhiyun 0x3f25c, 0x3f264,
1889*4882a593Smuzhiyun 0x3f270, 0x3f2b8,
1890*4882a593Smuzhiyun 0x3f2c0, 0x3f2e4,
1891*4882a593Smuzhiyun 0x3f2f8, 0x3f338,
1892*4882a593Smuzhiyun 0x3f340, 0x3f340,
1893*4882a593Smuzhiyun 0x3f348, 0x3f350,
1894*4882a593Smuzhiyun 0x3f35c, 0x3f364,
1895*4882a593Smuzhiyun 0x3f370, 0x3f3b8,
1896*4882a593Smuzhiyun 0x3f3c0, 0x3f3e4,
1897*4882a593Smuzhiyun 0x3f3f8, 0x3f428,
1898*4882a593Smuzhiyun 0x3f430, 0x3f448,
1899*4882a593Smuzhiyun 0x3f460, 0x3f468,
1900*4882a593Smuzhiyun 0x3f470, 0x3f49c,
1901*4882a593Smuzhiyun 0x3f4f0, 0x3f528,
1902*4882a593Smuzhiyun 0x3f530, 0x3f548,
1903*4882a593Smuzhiyun 0x3f560, 0x3f568,
1904*4882a593Smuzhiyun 0x3f570, 0x3f59c,
1905*4882a593Smuzhiyun 0x3f5f0, 0x3f638,
1906*4882a593Smuzhiyun 0x3f640, 0x3f640,
1907*4882a593Smuzhiyun 0x3f648, 0x3f650,
1908*4882a593Smuzhiyun 0x3f65c, 0x3f664,
1909*4882a593Smuzhiyun 0x3f670, 0x3f6b8,
1910*4882a593Smuzhiyun 0x3f6c0, 0x3f6e4,
1911*4882a593Smuzhiyun 0x3f6f8, 0x3f738,
1912*4882a593Smuzhiyun 0x3f740, 0x3f740,
1913*4882a593Smuzhiyun 0x3f748, 0x3f750,
1914*4882a593Smuzhiyun 0x3f75c, 0x3f764,
1915*4882a593Smuzhiyun 0x3f770, 0x3f7b8,
1916*4882a593Smuzhiyun 0x3f7c0, 0x3f7e4,
1917*4882a593Smuzhiyun 0x3f7f8, 0x3f7fc,
1918*4882a593Smuzhiyun 0x3f814, 0x3f814,
1919*4882a593Smuzhiyun 0x3f82c, 0x3f82c,
1920*4882a593Smuzhiyun 0x3f880, 0x3f88c,
1921*4882a593Smuzhiyun 0x3f8e8, 0x3f8ec,
1922*4882a593Smuzhiyun 0x3f900, 0x3f928,
1923*4882a593Smuzhiyun 0x3f930, 0x3f948,
1924*4882a593Smuzhiyun 0x3f960, 0x3f968,
1925*4882a593Smuzhiyun 0x3f970, 0x3f99c,
1926*4882a593Smuzhiyun 0x3f9f0, 0x3fa38,
1927*4882a593Smuzhiyun 0x3fa40, 0x3fa40,
1928*4882a593Smuzhiyun 0x3fa48, 0x3fa50,
1929*4882a593Smuzhiyun 0x3fa5c, 0x3fa64,
1930*4882a593Smuzhiyun 0x3fa70, 0x3fab8,
1931*4882a593Smuzhiyun 0x3fac0, 0x3fae4,
1932*4882a593Smuzhiyun 0x3faf8, 0x3fb10,
1933*4882a593Smuzhiyun 0x3fb28, 0x3fb28,
1934*4882a593Smuzhiyun 0x3fb3c, 0x3fb50,
1935*4882a593Smuzhiyun 0x3fbf0, 0x3fc10,
1936*4882a593Smuzhiyun 0x3fc28, 0x3fc28,
1937*4882a593Smuzhiyun 0x3fc3c, 0x3fc50,
1938*4882a593Smuzhiyun 0x3fcf0, 0x3fcfc,
1939*4882a593Smuzhiyun 0x40000, 0x4000c,
1940*4882a593Smuzhiyun 0x40040, 0x40050,
1941*4882a593Smuzhiyun 0x40060, 0x40068,
1942*4882a593Smuzhiyun 0x4007c, 0x4008c,
1943*4882a593Smuzhiyun 0x40094, 0x400b0,
1944*4882a593Smuzhiyun 0x400c0, 0x40144,
1945*4882a593Smuzhiyun 0x40180, 0x4018c,
1946*4882a593Smuzhiyun 0x40200, 0x40254,
1947*4882a593Smuzhiyun 0x40260, 0x40264,
1948*4882a593Smuzhiyun 0x40270, 0x40288,
1949*4882a593Smuzhiyun 0x40290, 0x40298,
1950*4882a593Smuzhiyun 0x402ac, 0x402c8,
1951*4882a593Smuzhiyun 0x402d0, 0x402e0,
1952*4882a593Smuzhiyun 0x402f0, 0x402f0,
1953*4882a593Smuzhiyun 0x40300, 0x4033c,
1954*4882a593Smuzhiyun 0x403f8, 0x403fc,
1955*4882a593Smuzhiyun 0x41304, 0x413c4,
1956*4882a593Smuzhiyun 0x41400, 0x4140c,
1957*4882a593Smuzhiyun 0x41414, 0x4141c,
1958*4882a593Smuzhiyun 0x41480, 0x414d0,
1959*4882a593Smuzhiyun 0x44000, 0x44054,
1960*4882a593Smuzhiyun 0x4405c, 0x44078,
1961*4882a593Smuzhiyun 0x440c0, 0x44174,
1962*4882a593Smuzhiyun 0x44180, 0x441ac,
1963*4882a593Smuzhiyun 0x441b4, 0x441b8,
1964*4882a593Smuzhiyun 0x441c0, 0x44254,
1965*4882a593Smuzhiyun 0x4425c, 0x44278,
1966*4882a593Smuzhiyun 0x442c0, 0x44374,
1967*4882a593Smuzhiyun 0x44380, 0x443ac,
1968*4882a593Smuzhiyun 0x443b4, 0x443b8,
1969*4882a593Smuzhiyun 0x443c0, 0x44454,
1970*4882a593Smuzhiyun 0x4445c, 0x44478,
1971*4882a593Smuzhiyun 0x444c0, 0x44574,
1972*4882a593Smuzhiyun 0x44580, 0x445ac,
1973*4882a593Smuzhiyun 0x445b4, 0x445b8,
1974*4882a593Smuzhiyun 0x445c0, 0x44654,
1975*4882a593Smuzhiyun 0x4465c, 0x44678,
1976*4882a593Smuzhiyun 0x446c0, 0x44774,
1977*4882a593Smuzhiyun 0x44780, 0x447ac,
1978*4882a593Smuzhiyun 0x447b4, 0x447b8,
1979*4882a593Smuzhiyun 0x447c0, 0x44854,
1980*4882a593Smuzhiyun 0x4485c, 0x44878,
1981*4882a593Smuzhiyun 0x448c0, 0x44974,
1982*4882a593Smuzhiyun 0x44980, 0x449ac,
1983*4882a593Smuzhiyun 0x449b4, 0x449b8,
1984*4882a593Smuzhiyun 0x449c0, 0x449fc,
1985*4882a593Smuzhiyun 0x45000, 0x45004,
1986*4882a593Smuzhiyun 0x45010, 0x45030,
1987*4882a593Smuzhiyun 0x45040, 0x45060,
1988*4882a593Smuzhiyun 0x45068, 0x45068,
1989*4882a593Smuzhiyun 0x45080, 0x45084,
1990*4882a593Smuzhiyun 0x450a0, 0x450b0,
1991*4882a593Smuzhiyun 0x45200, 0x45204,
1992*4882a593Smuzhiyun 0x45210, 0x45230,
1993*4882a593Smuzhiyun 0x45240, 0x45260,
1994*4882a593Smuzhiyun 0x45268, 0x45268,
1995*4882a593Smuzhiyun 0x45280, 0x45284,
1996*4882a593Smuzhiyun 0x452a0, 0x452b0,
1997*4882a593Smuzhiyun 0x460c0, 0x460e4,
1998*4882a593Smuzhiyun 0x47000, 0x4703c,
1999*4882a593Smuzhiyun 0x47044, 0x4708c,
2000*4882a593Smuzhiyun 0x47200, 0x47250,
2001*4882a593Smuzhiyun 0x47400, 0x47408,
2002*4882a593Smuzhiyun 0x47414, 0x47420,
2003*4882a593Smuzhiyun 0x47600, 0x47618,
2004*4882a593Smuzhiyun 0x47800, 0x47814,
2005*4882a593Smuzhiyun 0x48000, 0x4800c,
2006*4882a593Smuzhiyun 0x48040, 0x48050,
2007*4882a593Smuzhiyun 0x48060, 0x48068,
2008*4882a593Smuzhiyun 0x4807c, 0x4808c,
2009*4882a593Smuzhiyun 0x48094, 0x480b0,
2010*4882a593Smuzhiyun 0x480c0, 0x48144,
2011*4882a593Smuzhiyun 0x48180, 0x4818c,
2012*4882a593Smuzhiyun 0x48200, 0x48254,
2013*4882a593Smuzhiyun 0x48260, 0x48264,
2014*4882a593Smuzhiyun 0x48270, 0x48288,
2015*4882a593Smuzhiyun 0x48290, 0x48298,
2016*4882a593Smuzhiyun 0x482ac, 0x482c8,
2017*4882a593Smuzhiyun 0x482d0, 0x482e0,
2018*4882a593Smuzhiyun 0x482f0, 0x482f0,
2019*4882a593Smuzhiyun 0x48300, 0x4833c,
2020*4882a593Smuzhiyun 0x483f8, 0x483fc,
2021*4882a593Smuzhiyun 0x49304, 0x493c4,
2022*4882a593Smuzhiyun 0x49400, 0x4940c,
2023*4882a593Smuzhiyun 0x49414, 0x4941c,
2024*4882a593Smuzhiyun 0x49480, 0x494d0,
2025*4882a593Smuzhiyun 0x4c000, 0x4c054,
2026*4882a593Smuzhiyun 0x4c05c, 0x4c078,
2027*4882a593Smuzhiyun 0x4c0c0, 0x4c174,
2028*4882a593Smuzhiyun 0x4c180, 0x4c1ac,
2029*4882a593Smuzhiyun 0x4c1b4, 0x4c1b8,
2030*4882a593Smuzhiyun 0x4c1c0, 0x4c254,
2031*4882a593Smuzhiyun 0x4c25c, 0x4c278,
2032*4882a593Smuzhiyun 0x4c2c0, 0x4c374,
2033*4882a593Smuzhiyun 0x4c380, 0x4c3ac,
2034*4882a593Smuzhiyun 0x4c3b4, 0x4c3b8,
2035*4882a593Smuzhiyun 0x4c3c0, 0x4c454,
2036*4882a593Smuzhiyun 0x4c45c, 0x4c478,
2037*4882a593Smuzhiyun 0x4c4c0, 0x4c574,
2038*4882a593Smuzhiyun 0x4c580, 0x4c5ac,
2039*4882a593Smuzhiyun 0x4c5b4, 0x4c5b8,
2040*4882a593Smuzhiyun 0x4c5c0, 0x4c654,
2041*4882a593Smuzhiyun 0x4c65c, 0x4c678,
2042*4882a593Smuzhiyun 0x4c6c0, 0x4c774,
2043*4882a593Smuzhiyun 0x4c780, 0x4c7ac,
2044*4882a593Smuzhiyun 0x4c7b4, 0x4c7b8,
2045*4882a593Smuzhiyun 0x4c7c0, 0x4c854,
2046*4882a593Smuzhiyun 0x4c85c, 0x4c878,
2047*4882a593Smuzhiyun 0x4c8c0, 0x4c974,
2048*4882a593Smuzhiyun 0x4c980, 0x4c9ac,
2049*4882a593Smuzhiyun 0x4c9b4, 0x4c9b8,
2050*4882a593Smuzhiyun 0x4c9c0, 0x4c9fc,
2051*4882a593Smuzhiyun 0x4d000, 0x4d004,
2052*4882a593Smuzhiyun 0x4d010, 0x4d030,
2053*4882a593Smuzhiyun 0x4d040, 0x4d060,
2054*4882a593Smuzhiyun 0x4d068, 0x4d068,
2055*4882a593Smuzhiyun 0x4d080, 0x4d084,
2056*4882a593Smuzhiyun 0x4d0a0, 0x4d0b0,
2057*4882a593Smuzhiyun 0x4d200, 0x4d204,
2058*4882a593Smuzhiyun 0x4d210, 0x4d230,
2059*4882a593Smuzhiyun 0x4d240, 0x4d260,
2060*4882a593Smuzhiyun 0x4d268, 0x4d268,
2061*4882a593Smuzhiyun 0x4d280, 0x4d284,
2062*4882a593Smuzhiyun 0x4d2a0, 0x4d2b0,
2063*4882a593Smuzhiyun 0x4e0c0, 0x4e0e4,
2064*4882a593Smuzhiyun 0x4f000, 0x4f03c,
2065*4882a593Smuzhiyun 0x4f044, 0x4f08c,
2066*4882a593Smuzhiyun 0x4f200, 0x4f250,
2067*4882a593Smuzhiyun 0x4f400, 0x4f408,
2068*4882a593Smuzhiyun 0x4f414, 0x4f420,
2069*4882a593Smuzhiyun 0x4f600, 0x4f618,
2070*4882a593Smuzhiyun 0x4f800, 0x4f814,
2071*4882a593Smuzhiyun 0x50000, 0x50084,
2072*4882a593Smuzhiyun 0x50090, 0x500cc,
2073*4882a593Smuzhiyun 0x50400, 0x50400,
2074*4882a593Smuzhiyun 0x50800, 0x50884,
2075*4882a593Smuzhiyun 0x50890, 0x508cc,
2076*4882a593Smuzhiyun 0x50c00, 0x50c00,
2077*4882a593Smuzhiyun 0x51000, 0x5101c,
2078*4882a593Smuzhiyun 0x51300, 0x51308,
2079*4882a593Smuzhiyun };
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun static const unsigned int t6_reg_ranges[] = {
2082*4882a593Smuzhiyun 0x1008, 0x101c,
2083*4882a593Smuzhiyun 0x1024, 0x10a8,
2084*4882a593Smuzhiyun 0x10b4, 0x10f8,
2085*4882a593Smuzhiyun 0x1100, 0x1114,
2086*4882a593Smuzhiyun 0x111c, 0x112c,
2087*4882a593Smuzhiyun 0x1138, 0x113c,
2088*4882a593Smuzhiyun 0x1144, 0x114c,
2089*4882a593Smuzhiyun 0x1180, 0x1184,
2090*4882a593Smuzhiyun 0x1190, 0x1194,
2091*4882a593Smuzhiyun 0x11a0, 0x11a4,
2092*4882a593Smuzhiyun 0x11b0, 0x11b4,
2093*4882a593Smuzhiyun 0x11fc, 0x123c,
2094*4882a593Smuzhiyun 0x1254, 0x1274,
2095*4882a593Smuzhiyun 0x1280, 0x133c,
2096*4882a593Smuzhiyun 0x1800, 0x18fc,
2097*4882a593Smuzhiyun 0x3000, 0x302c,
2098*4882a593Smuzhiyun 0x3060, 0x30b0,
2099*4882a593Smuzhiyun 0x30b8, 0x30d8,
2100*4882a593Smuzhiyun 0x30e0, 0x30fc,
2101*4882a593Smuzhiyun 0x3140, 0x357c,
2102*4882a593Smuzhiyun 0x35a8, 0x35cc,
2103*4882a593Smuzhiyun 0x35ec, 0x35ec,
2104*4882a593Smuzhiyun 0x3600, 0x5624,
2105*4882a593Smuzhiyun 0x56cc, 0x56ec,
2106*4882a593Smuzhiyun 0x56f4, 0x5720,
2107*4882a593Smuzhiyun 0x5728, 0x575c,
2108*4882a593Smuzhiyun 0x580c, 0x5814,
2109*4882a593Smuzhiyun 0x5890, 0x589c,
2110*4882a593Smuzhiyun 0x58a4, 0x58ac,
2111*4882a593Smuzhiyun 0x58b8, 0x58bc,
2112*4882a593Smuzhiyun 0x5940, 0x595c,
2113*4882a593Smuzhiyun 0x5980, 0x598c,
2114*4882a593Smuzhiyun 0x59b0, 0x59c8,
2115*4882a593Smuzhiyun 0x59d0, 0x59dc,
2116*4882a593Smuzhiyun 0x59fc, 0x5a18,
2117*4882a593Smuzhiyun 0x5a60, 0x5a6c,
2118*4882a593Smuzhiyun 0x5a80, 0x5a8c,
2119*4882a593Smuzhiyun 0x5a94, 0x5a9c,
2120*4882a593Smuzhiyun 0x5b94, 0x5bfc,
2121*4882a593Smuzhiyun 0x5c10, 0x5e48,
2122*4882a593Smuzhiyun 0x5e50, 0x5e94,
2123*4882a593Smuzhiyun 0x5ea0, 0x5eb0,
2124*4882a593Smuzhiyun 0x5ec0, 0x5ec0,
2125*4882a593Smuzhiyun 0x5ec8, 0x5ed0,
2126*4882a593Smuzhiyun 0x5ee0, 0x5ee0,
2127*4882a593Smuzhiyun 0x5ef0, 0x5ef0,
2128*4882a593Smuzhiyun 0x5f00, 0x5f00,
2129*4882a593Smuzhiyun 0x6000, 0x6020,
2130*4882a593Smuzhiyun 0x6028, 0x6040,
2131*4882a593Smuzhiyun 0x6058, 0x609c,
2132*4882a593Smuzhiyun 0x60a8, 0x619c,
2133*4882a593Smuzhiyun 0x7700, 0x7798,
2134*4882a593Smuzhiyun 0x77c0, 0x7880,
2135*4882a593Smuzhiyun 0x78cc, 0x78fc,
2136*4882a593Smuzhiyun 0x7b00, 0x7b58,
2137*4882a593Smuzhiyun 0x7b60, 0x7b84,
2138*4882a593Smuzhiyun 0x7b8c, 0x7c54,
2139*4882a593Smuzhiyun 0x7d00, 0x7d38,
2140*4882a593Smuzhiyun 0x7d40, 0x7d84,
2141*4882a593Smuzhiyun 0x7d8c, 0x7ddc,
2142*4882a593Smuzhiyun 0x7de4, 0x7e04,
2143*4882a593Smuzhiyun 0x7e10, 0x7e1c,
2144*4882a593Smuzhiyun 0x7e24, 0x7e38,
2145*4882a593Smuzhiyun 0x7e40, 0x7e44,
2146*4882a593Smuzhiyun 0x7e4c, 0x7e78,
2147*4882a593Smuzhiyun 0x7e80, 0x7edc,
2148*4882a593Smuzhiyun 0x7ee8, 0x7efc,
2149*4882a593Smuzhiyun 0x8dc0, 0x8de4,
2150*4882a593Smuzhiyun 0x8df8, 0x8e04,
2151*4882a593Smuzhiyun 0x8e10, 0x8e84,
2152*4882a593Smuzhiyun 0x8ea0, 0x8f88,
2153*4882a593Smuzhiyun 0x8fb8, 0x9058,
2154*4882a593Smuzhiyun 0x9060, 0x9060,
2155*4882a593Smuzhiyun 0x9068, 0x90f8,
2156*4882a593Smuzhiyun 0x9100, 0x9124,
2157*4882a593Smuzhiyun 0x9400, 0x9470,
2158*4882a593Smuzhiyun 0x9600, 0x9600,
2159*4882a593Smuzhiyun 0x9608, 0x9638,
2160*4882a593Smuzhiyun 0x9640, 0x9704,
2161*4882a593Smuzhiyun 0x9710, 0x971c,
2162*4882a593Smuzhiyun 0x9800, 0x9808,
2163*4882a593Smuzhiyun 0x9810, 0x9864,
2164*4882a593Smuzhiyun 0x9c00, 0x9c6c,
2165*4882a593Smuzhiyun 0x9c80, 0x9cec,
2166*4882a593Smuzhiyun 0x9d00, 0x9d6c,
2167*4882a593Smuzhiyun 0x9d80, 0x9dec,
2168*4882a593Smuzhiyun 0x9e00, 0x9e6c,
2169*4882a593Smuzhiyun 0x9e80, 0x9eec,
2170*4882a593Smuzhiyun 0x9f00, 0x9f6c,
2171*4882a593Smuzhiyun 0x9f80, 0xa020,
2172*4882a593Smuzhiyun 0xd000, 0xd03c,
2173*4882a593Smuzhiyun 0xd100, 0xd118,
2174*4882a593Smuzhiyun 0xd200, 0xd214,
2175*4882a593Smuzhiyun 0xd220, 0xd234,
2176*4882a593Smuzhiyun 0xd240, 0xd254,
2177*4882a593Smuzhiyun 0xd260, 0xd274,
2178*4882a593Smuzhiyun 0xd280, 0xd294,
2179*4882a593Smuzhiyun 0xd2a0, 0xd2b4,
2180*4882a593Smuzhiyun 0xd2c0, 0xd2d4,
2181*4882a593Smuzhiyun 0xd2e0, 0xd2f4,
2182*4882a593Smuzhiyun 0xd300, 0xd31c,
2183*4882a593Smuzhiyun 0xdfc0, 0xdfe0,
2184*4882a593Smuzhiyun 0xe000, 0xf008,
2185*4882a593Smuzhiyun 0xf010, 0xf018,
2186*4882a593Smuzhiyun 0xf020, 0xf028,
2187*4882a593Smuzhiyun 0x11000, 0x11014,
2188*4882a593Smuzhiyun 0x11048, 0x1106c,
2189*4882a593Smuzhiyun 0x11074, 0x11088,
2190*4882a593Smuzhiyun 0x11098, 0x11120,
2191*4882a593Smuzhiyun 0x1112c, 0x1117c,
2192*4882a593Smuzhiyun 0x11190, 0x112e0,
2193*4882a593Smuzhiyun 0x11300, 0x1130c,
2194*4882a593Smuzhiyun 0x12000, 0x1206c,
2195*4882a593Smuzhiyun 0x19040, 0x1906c,
2196*4882a593Smuzhiyun 0x19078, 0x19080,
2197*4882a593Smuzhiyun 0x1908c, 0x190e8,
2198*4882a593Smuzhiyun 0x190f0, 0x190f8,
2199*4882a593Smuzhiyun 0x19100, 0x19110,
2200*4882a593Smuzhiyun 0x19120, 0x19124,
2201*4882a593Smuzhiyun 0x19150, 0x19194,
2202*4882a593Smuzhiyun 0x1919c, 0x191b0,
2203*4882a593Smuzhiyun 0x191d0, 0x191e8,
2204*4882a593Smuzhiyun 0x19238, 0x19290,
2205*4882a593Smuzhiyun 0x192a4, 0x192b0,
2206*4882a593Smuzhiyun 0x192bc, 0x192bc,
2207*4882a593Smuzhiyun 0x19348, 0x1934c,
2208*4882a593Smuzhiyun 0x193f8, 0x19418,
2209*4882a593Smuzhiyun 0x19420, 0x19428,
2210*4882a593Smuzhiyun 0x19430, 0x19444,
2211*4882a593Smuzhiyun 0x1944c, 0x1946c,
2212*4882a593Smuzhiyun 0x19474, 0x19474,
2213*4882a593Smuzhiyun 0x19490, 0x194cc,
2214*4882a593Smuzhiyun 0x194f0, 0x194f8,
2215*4882a593Smuzhiyun 0x19c00, 0x19c48,
2216*4882a593Smuzhiyun 0x19c50, 0x19c80,
2217*4882a593Smuzhiyun 0x19c94, 0x19c98,
2218*4882a593Smuzhiyun 0x19ca0, 0x19cbc,
2219*4882a593Smuzhiyun 0x19ce4, 0x19ce4,
2220*4882a593Smuzhiyun 0x19cf0, 0x19cf8,
2221*4882a593Smuzhiyun 0x19d00, 0x19d28,
2222*4882a593Smuzhiyun 0x19d50, 0x19d78,
2223*4882a593Smuzhiyun 0x19d94, 0x19d98,
2224*4882a593Smuzhiyun 0x19da0, 0x19dc8,
2225*4882a593Smuzhiyun 0x19df0, 0x19e10,
2226*4882a593Smuzhiyun 0x19e50, 0x19e6c,
2227*4882a593Smuzhiyun 0x19ea0, 0x19ebc,
2228*4882a593Smuzhiyun 0x19ec4, 0x19ef4,
2229*4882a593Smuzhiyun 0x19f04, 0x19f2c,
2230*4882a593Smuzhiyun 0x19f34, 0x19f34,
2231*4882a593Smuzhiyun 0x19f40, 0x19f50,
2232*4882a593Smuzhiyun 0x19f90, 0x19fac,
2233*4882a593Smuzhiyun 0x19fc4, 0x19fc8,
2234*4882a593Smuzhiyun 0x19fd0, 0x19fe4,
2235*4882a593Smuzhiyun 0x1a000, 0x1a004,
2236*4882a593Smuzhiyun 0x1a010, 0x1a06c,
2237*4882a593Smuzhiyun 0x1a0b0, 0x1a0e4,
2238*4882a593Smuzhiyun 0x1a0ec, 0x1a0f8,
2239*4882a593Smuzhiyun 0x1a100, 0x1a108,
2240*4882a593Smuzhiyun 0x1a114, 0x1a130,
2241*4882a593Smuzhiyun 0x1a138, 0x1a1c4,
2242*4882a593Smuzhiyun 0x1a1fc, 0x1a1fc,
2243*4882a593Smuzhiyun 0x1e008, 0x1e00c,
2244*4882a593Smuzhiyun 0x1e040, 0x1e044,
2245*4882a593Smuzhiyun 0x1e04c, 0x1e04c,
2246*4882a593Smuzhiyun 0x1e284, 0x1e290,
2247*4882a593Smuzhiyun 0x1e2c0, 0x1e2c0,
2248*4882a593Smuzhiyun 0x1e2e0, 0x1e2e0,
2249*4882a593Smuzhiyun 0x1e300, 0x1e384,
2250*4882a593Smuzhiyun 0x1e3c0, 0x1e3c8,
2251*4882a593Smuzhiyun 0x1e408, 0x1e40c,
2252*4882a593Smuzhiyun 0x1e440, 0x1e444,
2253*4882a593Smuzhiyun 0x1e44c, 0x1e44c,
2254*4882a593Smuzhiyun 0x1e684, 0x1e690,
2255*4882a593Smuzhiyun 0x1e6c0, 0x1e6c0,
2256*4882a593Smuzhiyun 0x1e6e0, 0x1e6e0,
2257*4882a593Smuzhiyun 0x1e700, 0x1e784,
2258*4882a593Smuzhiyun 0x1e7c0, 0x1e7c8,
2259*4882a593Smuzhiyun 0x1e808, 0x1e80c,
2260*4882a593Smuzhiyun 0x1e840, 0x1e844,
2261*4882a593Smuzhiyun 0x1e84c, 0x1e84c,
2262*4882a593Smuzhiyun 0x1ea84, 0x1ea90,
2263*4882a593Smuzhiyun 0x1eac0, 0x1eac0,
2264*4882a593Smuzhiyun 0x1eae0, 0x1eae0,
2265*4882a593Smuzhiyun 0x1eb00, 0x1eb84,
2266*4882a593Smuzhiyun 0x1ebc0, 0x1ebc8,
2267*4882a593Smuzhiyun 0x1ec08, 0x1ec0c,
2268*4882a593Smuzhiyun 0x1ec40, 0x1ec44,
2269*4882a593Smuzhiyun 0x1ec4c, 0x1ec4c,
2270*4882a593Smuzhiyun 0x1ee84, 0x1ee90,
2271*4882a593Smuzhiyun 0x1eec0, 0x1eec0,
2272*4882a593Smuzhiyun 0x1eee0, 0x1eee0,
2273*4882a593Smuzhiyun 0x1ef00, 0x1ef84,
2274*4882a593Smuzhiyun 0x1efc0, 0x1efc8,
2275*4882a593Smuzhiyun 0x1f008, 0x1f00c,
2276*4882a593Smuzhiyun 0x1f040, 0x1f044,
2277*4882a593Smuzhiyun 0x1f04c, 0x1f04c,
2278*4882a593Smuzhiyun 0x1f284, 0x1f290,
2279*4882a593Smuzhiyun 0x1f2c0, 0x1f2c0,
2280*4882a593Smuzhiyun 0x1f2e0, 0x1f2e0,
2281*4882a593Smuzhiyun 0x1f300, 0x1f384,
2282*4882a593Smuzhiyun 0x1f3c0, 0x1f3c8,
2283*4882a593Smuzhiyun 0x1f408, 0x1f40c,
2284*4882a593Smuzhiyun 0x1f440, 0x1f444,
2285*4882a593Smuzhiyun 0x1f44c, 0x1f44c,
2286*4882a593Smuzhiyun 0x1f684, 0x1f690,
2287*4882a593Smuzhiyun 0x1f6c0, 0x1f6c0,
2288*4882a593Smuzhiyun 0x1f6e0, 0x1f6e0,
2289*4882a593Smuzhiyun 0x1f700, 0x1f784,
2290*4882a593Smuzhiyun 0x1f7c0, 0x1f7c8,
2291*4882a593Smuzhiyun 0x1f808, 0x1f80c,
2292*4882a593Smuzhiyun 0x1f840, 0x1f844,
2293*4882a593Smuzhiyun 0x1f84c, 0x1f84c,
2294*4882a593Smuzhiyun 0x1fa84, 0x1fa90,
2295*4882a593Smuzhiyun 0x1fac0, 0x1fac0,
2296*4882a593Smuzhiyun 0x1fae0, 0x1fae0,
2297*4882a593Smuzhiyun 0x1fb00, 0x1fb84,
2298*4882a593Smuzhiyun 0x1fbc0, 0x1fbc8,
2299*4882a593Smuzhiyun 0x1fc08, 0x1fc0c,
2300*4882a593Smuzhiyun 0x1fc40, 0x1fc44,
2301*4882a593Smuzhiyun 0x1fc4c, 0x1fc4c,
2302*4882a593Smuzhiyun 0x1fe84, 0x1fe90,
2303*4882a593Smuzhiyun 0x1fec0, 0x1fec0,
2304*4882a593Smuzhiyun 0x1fee0, 0x1fee0,
2305*4882a593Smuzhiyun 0x1ff00, 0x1ff84,
2306*4882a593Smuzhiyun 0x1ffc0, 0x1ffc8,
2307*4882a593Smuzhiyun 0x30000, 0x30030,
2308*4882a593Smuzhiyun 0x30100, 0x30168,
2309*4882a593Smuzhiyun 0x30190, 0x301a0,
2310*4882a593Smuzhiyun 0x301a8, 0x301b8,
2311*4882a593Smuzhiyun 0x301c4, 0x301c8,
2312*4882a593Smuzhiyun 0x301d0, 0x301d0,
2313*4882a593Smuzhiyun 0x30200, 0x30320,
2314*4882a593Smuzhiyun 0x30400, 0x304b4,
2315*4882a593Smuzhiyun 0x304c0, 0x3052c,
2316*4882a593Smuzhiyun 0x30540, 0x3061c,
2317*4882a593Smuzhiyun 0x30800, 0x308a0,
2318*4882a593Smuzhiyun 0x308c0, 0x30908,
2319*4882a593Smuzhiyun 0x30910, 0x309b8,
2320*4882a593Smuzhiyun 0x30a00, 0x30a04,
2321*4882a593Smuzhiyun 0x30a0c, 0x30a14,
2322*4882a593Smuzhiyun 0x30a1c, 0x30a2c,
2323*4882a593Smuzhiyun 0x30a44, 0x30a50,
2324*4882a593Smuzhiyun 0x30a74, 0x30a74,
2325*4882a593Smuzhiyun 0x30a7c, 0x30afc,
2326*4882a593Smuzhiyun 0x30b08, 0x30c24,
2327*4882a593Smuzhiyun 0x30d00, 0x30d14,
2328*4882a593Smuzhiyun 0x30d1c, 0x30d3c,
2329*4882a593Smuzhiyun 0x30d44, 0x30d4c,
2330*4882a593Smuzhiyun 0x30d54, 0x30d74,
2331*4882a593Smuzhiyun 0x30d7c, 0x30d7c,
2332*4882a593Smuzhiyun 0x30de0, 0x30de0,
2333*4882a593Smuzhiyun 0x30e00, 0x30ed4,
2334*4882a593Smuzhiyun 0x30f00, 0x30fa4,
2335*4882a593Smuzhiyun 0x30fc0, 0x30fc4,
2336*4882a593Smuzhiyun 0x31000, 0x31004,
2337*4882a593Smuzhiyun 0x31080, 0x310fc,
2338*4882a593Smuzhiyun 0x31208, 0x31220,
2339*4882a593Smuzhiyun 0x3123c, 0x31254,
2340*4882a593Smuzhiyun 0x31300, 0x31300,
2341*4882a593Smuzhiyun 0x31308, 0x3131c,
2342*4882a593Smuzhiyun 0x31338, 0x3133c,
2343*4882a593Smuzhiyun 0x31380, 0x31380,
2344*4882a593Smuzhiyun 0x31388, 0x313a8,
2345*4882a593Smuzhiyun 0x313b4, 0x313b4,
2346*4882a593Smuzhiyun 0x31400, 0x31420,
2347*4882a593Smuzhiyun 0x31438, 0x3143c,
2348*4882a593Smuzhiyun 0x31480, 0x31480,
2349*4882a593Smuzhiyun 0x314a8, 0x314a8,
2350*4882a593Smuzhiyun 0x314b0, 0x314b4,
2351*4882a593Smuzhiyun 0x314c8, 0x314d4,
2352*4882a593Smuzhiyun 0x31a40, 0x31a4c,
2353*4882a593Smuzhiyun 0x31af0, 0x31b20,
2354*4882a593Smuzhiyun 0x31b38, 0x31b3c,
2355*4882a593Smuzhiyun 0x31b80, 0x31b80,
2356*4882a593Smuzhiyun 0x31ba8, 0x31ba8,
2357*4882a593Smuzhiyun 0x31bb0, 0x31bb4,
2358*4882a593Smuzhiyun 0x31bc8, 0x31bd4,
2359*4882a593Smuzhiyun 0x32140, 0x3218c,
2360*4882a593Smuzhiyun 0x321f0, 0x321f4,
2361*4882a593Smuzhiyun 0x32200, 0x32200,
2362*4882a593Smuzhiyun 0x32218, 0x32218,
2363*4882a593Smuzhiyun 0x32400, 0x32400,
2364*4882a593Smuzhiyun 0x32408, 0x3241c,
2365*4882a593Smuzhiyun 0x32618, 0x32620,
2366*4882a593Smuzhiyun 0x32664, 0x32664,
2367*4882a593Smuzhiyun 0x326a8, 0x326a8,
2368*4882a593Smuzhiyun 0x326ec, 0x326ec,
2369*4882a593Smuzhiyun 0x32a00, 0x32abc,
2370*4882a593Smuzhiyun 0x32b00, 0x32b18,
2371*4882a593Smuzhiyun 0x32b20, 0x32b38,
2372*4882a593Smuzhiyun 0x32b40, 0x32b58,
2373*4882a593Smuzhiyun 0x32b60, 0x32b78,
2374*4882a593Smuzhiyun 0x32c00, 0x32c00,
2375*4882a593Smuzhiyun 0x32c08, 0x32c3c,
2376*4882a593Smuzhiyun 0x33000, 0x3302c,
2377*4882a593Smuzhiyun 0x33034, 0x33050,
2378*4882a593Smuzhiyun 0x33058, 0x33058,
2379*4882a593Smuzhiyun 0x33060, 0x3308c,
2380*4882a593Smuzhiyun 0x3309c, 0x330ac,
2381*4882a593Smuzhiyun 0x330c0, 0x330c0,
2382*4882a593Smuzhiyun 0x330c8, 0x330d0,
2383*4882a593Smuzhiyun 0x330d8, 0x330e0,
2384*4882a593Smuzhiyun 0x330ec, 0x3312c,
2385*4882a593Smuzhiyun 0x33134, 0x33150,
2386*4882a593Smuzhiyun 0x33158, 0x33158,
2387*4882a593Smuzhiyun 0x33160, 0x3318c,
2388*4882a593Smuzhiyun 0x3319c, 0x331ac,
2389*4882a593Smuzhiyun 0x331c0, 0x331c0,
2390*4882a593Smuzhiyun 0x331c8, 0x331d0,
2391*4882a593Smuzhiyun 0x331d8, 0x331e0,
2392*4882a593Smuzhiyun 0x331ec, 0x33290,
2393*4882a593Smuzhiyun 0x33298, 0x332c4,
2394*4882a593Smuzhiyun 0x332e4, 0x33390,
2395*4882a593Smuzhiyun 0x33398, 0x333c4,
2396*4882a593Smuzhiyun 0x333e4, 0x3342c,
2397*4882a593Smuzhiyun 0x33434, 0x33450,
2398*4882a593Smuzhiyun 0x33458, 0x33458,
2399*4882a593Smuzhiyun 0x33460, 0x3348c,
2400*4882a593Smuzhiyun 0x3349c, 0x334ac,
2401*4882a593Smuzhiyun 0x334c0, 0x334c0,
2402*4882a593Smuzhiyun 0x334c8, 0x334d0,
2403*4882a593Smuzhiyun 0x334d8, 0x334e0,
2404*4882a593Smuzhiyun 0x334ec, 0x3352c,
2405*4882a593Smuzhiyun 0x33534, 0x33550,
2406*4882a593Smuzhiyun 0x33558, 0x33558,
2407*4882a593Smuzhiyun 0x33560, 0x3358c,
2408*4882a593Smuzhiyun 0x3359c, 0x335ac,
2409*4882a593Smuzhiyun 0x335c0, 0x335c0,
2410*4882a593Smuzhiyun 0x335c8, 0x335d0,
2411*4882a593Smuzhiyun 0x335d8, 0x335e0,
2412*4882a593Smuzhiyun 0x335ec, 0x33690,
2413*4882a593Smuzhiyun 0x33698, 0x336c4,
2414*4882a593Smuzhiyun 0x336e4, 0x33790,
2415*4882a593Smuzhiyun 0x33798, 0x337c4,
2416*4882a593Smuzhiyun 0x337e4, 0x337fc,
2417*4882a593Smuzhiyun 0x33814, 0x33814,
2418*4882a593Smuzhiyun 0x33854, 0x33868,
2419*4882a593Smuzhiyun 0x33880, 0x3388c,
2420*4882a593Smuzhiyun 0x338c0, 0x338d0,
2421*4882a593Smuzhiyun 0x338e8, 0x338ec,
2422*4882a593Smuzhiyun 0x33900, 0x3392c,
2423*4882a593Smuzhiyun 0x33934, 0x33950,
2424*4882a593Smuzhiyun 0x33958, 0x33958,
2425*4882a593Smuzhiyun 0x33960, 0x3398c,
2426*4882a593Smuzhiyun 0x3399c, 0x339ac,
2427*4882a593Smuzhiyun 0x339c0, 0x339c0,
2428*4882a593Smuzhiyun 0x339c8, 0x339d0,
2429*4882a593Smuzhiyun 0x339d8, 0x339e0,
2430*4882a593Smuzhiyun 0x339ec, 0x33a90,
2431*4882a593Smuzhiyun 0x33a98, 0x33ac4,
2432*4882a593Smuzhiyun 0x33ae4, 0x33b10,
2433*4882a593Smuzhiyun 0x33b24, 0x33b28,
2434*4882a593Smuzhiyun 0x33b38, 0x33b50,
2435*4882a593Smuzhiyun 0x33bf0, 0x33c10,
2436*4882a593Smuzhiyun 0x33c24, 0x33c28,
2437*4882a593Smuzhiyun 0x33c38, 0x33c50,
2438*4882a593Smuzhiyun 0x33cf0, 0x33cfc,
2439*4882a593Smuzhiyun 0x34000, 0x34030,
2440*4882a593Smuzhiyun 0x34100, 0x34168,
2441*4882a593Smuzhiyun 0x34190, 0x341a0,
2442*4882a593Smuzhiyun 0x341a8, 0x341b8,
2443*4882a593Smuzhiyun 0x341c4, 0x341c8,
2444*4882a593Smuzhiyun 0x341d0, 0x341d0,
2445*4882a593Smuzhiyun 0x34200, 0x34320,
2446*4882a593Smuzhiyun 0x34400, 0x344b4,
2447*4882a593Smuzhiyun 0x344c0, 0x3452c,
2448*4882a593Smuzhiyun 0x34540, 0x3461c,
2449*4882a593Smuzhiyun 0x34800, 0x348a0,
2450*4882a593Smuzhiyun 0x348c0, 0x34908,
2451*4882a593Smuzhiyun 0x34910, 0x349b8,
2452*4882a593Smuzhiyun 0x34a00, 0x34a04,
2453*4882a593Smuzhiyun 0x34a0c, 0x34a14,
2454*4882a593Smuzhiyun 0x34a1c, 0x34a2c,
2455*4882a593Smuzhiyun 0x34a44, 0x34a50,
2456*4882a593Smuzhiyun 0x34a74, 0x34a74,
2457*4882a593Smuzhiyun 0x34a7c, 0x34afc,
2458*4882a593Smuzhiyun 0x34b08, 0x34c24,
2459*4882a593Smuzhiyun 0x34d00, 0x34d14,
2460*4882a593Smuzhiyun 0x34d1c, 0x34d3c,
2461*4882a593Smuzhiyun 0x34d44, 0x34d4c,
2462*4882a593Smuzhiyun 0x34d54, 0x34d74,
2463*4882a593Smuzhiyun 0x34d7c, 0x34d7c,
2464*4882a593Smuzhiyun 0x34de0, 0x34de0,
2465*4882a593Smuzhiyun 0x34e00, 0x34ed4,
2466*4882a593Smuzhiyun 0x34f00, 0x34fa4,
2467*4882a593Smuzhiyun 0x34fc0, 0x34fc4,
2468*4882a593Smuzhiyun 0x35000, 0x35004,
2469*4882a593Smuzhiyun 0x35080, 0x350fc,
2470*4882a593Smuzhiyun 0x35208, 0x35220,
2471*4882a593Smuzhiyun 0x3523c, 0x35254,
2472*4882a593Smuzhiyun 0x35300, 0x35300,
2473*4882a593Smuzhiyun 0x35308, 0x3531c,
2474*4882a593Smuzhiyun 0x35338, 0x3533c,
2475*4882a593Smuzhiyun 0x35380, 0x35380,
2476*4882a593Smuzhiyun 0x35388, 0x353a8,
2477*4882a593Smuzhiyun 0x353b4, 0x353b4,
2478*4882a593Smuzhiyun 0x35400, 0x35420,
2479*4882a593Smuzhiyun 0x35438, 0x3543c,
2480*4882a593Smuzhiyun 0x35480, 0x35480,
2481*4882a593Smuzhiyun 0x354a8, 0x354a8,
2482*4882a593Smuzhiyun 0x354b0, 0x354b4,
2483*4882a593Smuzhiyun 0x354c8, 0x354d4,
2484*4882a593Smuzhiyun 0x35a40, 0x35a4c,
2485*4882a593Smuzhiyun 0x35af0, 0x35b20,
2486*4882a593Smuzhiyun 0x35b38, 0x35b3c,
2487*4882a593Smuzhiyun 0x35b80, 0x35b80,
2488*4882a593Smuzhiyun 0x35ba8, 0x35ba8,
2489*4882a593Smuzhiyun 0x35bb0, 0x35bb4,
2490*4882a593Smuzhiyun 0x35bc8, 0x35bd4,
2491*4882a593Smuzhiyun 0x36140, 0x3618c,
2492*4882a593Smuzhiyun 0x361f0, 0x361f4,
2493*4882a593Smuzhiyun 0x36200, 0x36200,
2494*4882a593Smuzhiyun 0x36218, 0x36218,
2495*4882a593Smuzhiyun 0x36400, 0x36400,
2496*4882a593Smuzhiyun 0x36408, 0x3641c,
2497*4882a593Smuzhiyun 0x36618, 0x36620,
2498*4882a593Smuzhiyun 0x36664, 0x36664,
2499*4882a593Smuzhiyun 0x366a8, 0x366a8,
2500*4882a593Smuzhiyun 0x366ec, 0x366ec,
2501*4882a593Smuzhiyun 0x36a00, 0x36abc,
2502*4882a593Smuzhiyun 0x36b00, 0x36b18,
2503*4882a593Smuzhiyun 0x36b20, 0x36b38,
2504*4882a593Smuzhiyun 0x36b40, 0x36b58,
2505*4882a593Smuzhiyun 0x36b60, 0x36b78,
2506*4882a593Smuzhiyun 0x36c00, 0x36c00,
2507*4882a593Smuzhiyun 0x36c08, 0x36c3c,
2508*4882a593Smuzhiyun 0x37000, 0x3702c,
2509*4882a593Smuzhiyun 0x37034, 0x37050,
2510*4882a593Smuzhiyun 0x37058, 0x37058,
2511*4882a593Smuzhiyun 0x37060, 0x3708c,
2512*4882a593Smuzhiyun 0x3709c, 0x370ac,
2513*4882a593Smuzhiyun 0x370c0, 0x370c0,
2514*4882a593Smuzhiyun 0x370c8, 0x370d0,
2515*4882a593Smuzhiyun 0x370d8, 0x370e0,
2516*4882a593Smuzhiyun 0x370ec, 0x3712c,
2517*4882a593Smuzhiyun 0x37134, 0x37150,
2518*4882a593Smuzhiyun 0x37158, 0x37158,
2519*4882a593Smuzhiyun 0x37160, 0x3718c,
2520*4882a593Smuzhiyun 0x3719c, 0x371ac,
2521*4882a593Smuzhiyun 0x371c0, 0x371c0,
2522*4882a593Smuzhiyun 0x371c8, 0x371d0,
2523*4882a593Smuzhiyun 0x371d8, 0x371e0,
2524*4882a593Smuzhiyun 0x371ec, 0x37290,
2525*4882a593Smuzhiyun 0x37298, 0x372c4,
2526*4882a593Smuzhiyun 0x372e4, 0x37390,
2527*4882a593Smuzhiyun 0x37398, 0x373c4,
2528*4882a593Smuzhiyun 0x373e4, 0x3742c,
2529*4882a593Smuzhiyun 0x37434, 0x37450,
2530*4882a593Smuzhiyun 0x37458, 0x37458,
2531*4882a593Smuzhiyun 0x37460, 0x3748c,
2532*4882a593Smuzhiyun 0x3749c, 0x374ac,
2533*4882a593Smuzhiyun 0x374c0, 0x374c0,
2534*4882a593Smuzhiyun 0x374c8, 0x374d0,
2535*4882a593Smuzhiyun 0x374d8, 0x374e0,
2536*4882a593Smuzhiyun 0x374ec, 0x3752c,
2537*4882a593Smuzhiyun 0x37534, 0x37550,
2538*4882a593Smuzhiyun 0x37558, 0x37558,
2539*4882a593Smuzhiyun 0x37560, 0x3758c,
2540*4882a593Smuzhiyun 0x3759c, 0x375ac,
2541*4882a593Smuzhiyun 0x375c0, 0x375c0,
2542*4882a593Smuzhiyun 0x375c8, 0x375d0,
2543*4882a593Smuzhiyun 0x375d8, 0x375e0,
2544*4882a593Smuzhiyun 0x375ec, 0x37690,
2545*4882a593Smuzhiyun 0x37698, 0x376c4,
2546*4882a593Smuzhiyun 0x376e4, 0x37790,
2547*4882a593Smuzhiyun 0x37798, 0x377c4,
2548*4882a593Smuzhiyun 0x377e4, 0x377fc,
2549*4882a593Smuzhiyun 0x37814, 0x37814,
2550*4882a593Smuzhiyun 0x37854, 0x37868,
2551*4882a593Smuzhiyun 0x37880, 0x3788c,
2552*4882a593Smuzhiyun 0x378c0, 0x378d0,
2553*4882a593Smuzhiyun 0x378e8, 0x378ec,
2554*4882a593Smuzhiyun 0x37900, 0x3792c,
2555*4882a593Smuzhiyun 0x37934, 0x37950,
2556*4882a593Smuzhiyun 0x37958, 0x37958,
2557*4882a593Smuzhiyun 0x37960, 0x3798c,
2558*4882a593Smuzhiyun 0x3799c, 0x379ac,
2559*4882a593Smuzhiyun 0x379c0, 0x379c0,
2560*4882a593Smuzhiyun 0x379c8, 0x379d0,
2561*4882a593Smuzhiyun 0x379d8, 0x379e0,
2562*4882a593Smuzhiyun 0x379ec, 0x37a90,
2563*4882a593Smuzhiyun 0x37a98, 0x37ac4,
2564*4882a593Smuzhiyun 0x37ae4, 0x37b10,
2565*4882a593Smuzhiyun 0x37b24, 0x37b28,
2566*4882a593Smuzhiyun 0x37b38, 0x37b50,
2567*4882a593Smuzhiyun 0x37bf0, 0x37c10,
2568*4882a593Smuzhiyun 0x37c24, 0x37c28,
2569*4882a593Smuzhiyun 0x37c38, 0x37c50,
2570*4882a593Smuzhiyun 0x37cf0, 0x37cfc,
2571*4882a593Smuzhiyun 0x40040, 0x40040,
2572*4882a593Smuzhiyun 0x40080, 0x40084,
2573*4882a593Smuzhiyun 0x40100, 0x40100,
2574*4882a593Smuzhiyun 0x40140, 0x401bc,
2575*4882a593Smuzhiyun 0x40200, 0x40214,
2576*4882a593Smuzhiyun 0x40228, 0x40228,
2577*4882a593Smuzhiyun 0x40240, 0x40258,
2578*4882a593Smuzhiyun 0x40280, 0x40280,
2579*4882a593Smuzhiyun 0x40304, 0x40304,
2580*4882a593Smuzhiyun 0x40330, 0x4033c,
2581*4882a593Smuzhiyun 0x41304, 0x413c8,
2582*4882a593Smuzhiyun 0x413d0, 0x413dc,
2583*4882a593Smuzhiyun 0x413f0, 0x413f0,
2584*4882a593Smuzhiyun 0x41400, 0x4140c,
2585*4882a593Smuzhiyun 0x41414, 0x4141c,
2586*4882a593Smuzhiyun 0x41480, 0x414d0,
2587*4882a593Smuzhiyun 0x44000, 0x4407c,
2588*4882a593Smuzhiyun 0x440c0, 0x441ac,
2589*4882a593Smuzhiyun 0x441b4, 0x4427c,
2590*4882a593Smuzhiyun 0x442c0, 0x443ac,
2591*4882a593Smuzhiyun 0x443b4, 0x4447c,
2592*4882a593Smuzhiyun 0x444c0, 0x445ac,
2593*4882a593Smuzhiyun 0x445b4, 0x4467c,
2594*4882a593Smuzhiyun 0x446c0, 0x447ac,
2595*4882a593Smuzhiyun 0x447b4, 0x4487c,
2596*4882a593Smuzhiyun 0x448c0, 0x449ac,
2597*4882a593Smuzhiyun 0x449b4, 0x44a7c,
2598*4882a593Smuzhiyun 0x44ac0, 0x44bac,
2599*4882a593Smuzhiyun 0x44bb4, 0x44c7c,
2600*4882a593Smuzhiyun 0x44cc0, 0x44dac,
2601*4882a593Smuzhiyun 0x44db4, 0x44e7c,
2602*4882a593Smuzhiyun 0x44ec0, 0x44fac,
2603*4882a593Smuzhiyun 0x44fb4, 0x4507c,
2604*4882a593Smuzhiyun 0x450c0, 0x451ac,
2605*4882a593Smuzhiyun 0x451b4, 0x451fc,
2606*4882a593Smuzhiyun 0x45800, 0x45804,
2607*4882a593Smuzhiyun 0x45810, 0x45830,
2608*4882a593Smuzhiyun 0x45840, 0x45860,
2609*4882a593Smuzhiyun 0x45868, 0x45868,
2610*4882a593Smuzhiyun 0x45880, 0x45884,
2611*4882a593Smuzhiyun 0x458a0, 0x458b0,
2612*4882a593Smuzhiyun 0x45a00, 0x45a04,
2613*4882a593Smuzhiyun 0x45a10, 0x45a30,
2614*4882a593Smuzhiyun 0x45a40, 0x45a60,
2615*4882a593Smuzhiyun 0x45a68, 0x45a68,
2616*4882a593Smuzhiyun 0x45a80, 0x45a84,
2617*4882a593Smuzhiyun 0x45aa0, 0x45ab0,
2618*4882a593Smuzhiyun 0x460c0, 0x460e4,
2619*4882a593Smuzhiyun 0x47000, 0x4703c,
2620*4882a593Smuzhiyun 0x47044, 0x4708c,
2621*4882a593Smuzhiyun 0x47200, 0x47250,
2622*4882a593Smuzhiyun 0x47400, 0x47408,
2623*4882a593Smuzhiyun 0x47414, 0x47420,
2624*4882a593Smuzhiyun 0x47600, 0x47618,
2625*4882a593Smuzhiyun 0x47800, 0x47814,
2626*4882a593Smuzhiyun 0x47820, 0x4782c,
2627*4882a593Smuzhiyun 0x50000, 0x50084,
2628*4882a593Smuzhiyun 0x50090, 0x500cc,
2629*4882a593Smuzhiyun 0x50300, 0x50384,
2630*4882a593Smuzhiyun 0x50400, 0x50400,
2631*4882a593Smuzhiyun 0x50800, 0x50884,
2632*4882a593Smuzhiyun 0x50890, 0x508cc,
2633*4882a593Smuzhiyun 0x50b00, 0x50b84,
2634*4882a593Smuzhiyun 0x50c00, 0x50c00,
2635*4882a593Smuzhiyun 0x51000, 0x51020,
2636*4882a593Smuzhiyun 0x51028, 0x510b0,
2637*4882a593Smuzhiyun 0x51300, 0x51324,
2638*4882a593Smuzhiyun };
2639*4882a593Smuzhiyun
2640*4882a593Smuzhiyun u32 *buf_end = (u32 *)((char *)buf + buf_size);
2641*4882a593Smuzhiyun const unsigned int *reg_ranges;
2642*4882a593Smuzhiyun int reg_ranges_size, range;
2643*4882a593Smuzhiyun unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun /* Select the right set of register ranges to dump depending on the
2646*4882a593Smuzhiyun * adapter chip type.
2647*4882a593Smuzhiyun */
2648*4882a593Smuzhiyun switch (chip_version) {
2649*4882a593Smuzhiyun case CHELSIO_T4:
2650*4882a593Smuzhiyun reg_ranges = t4_reg_ranges;
2651*4882a593Smuzhiyun reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2652*4882a593Smuzhiyun break;
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun case CHELSIO_T5:
2655*4882a593Smuzhiyun reg_ranges = t5_reg_ranges;
2656*4882a593Smuzhiyun reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2657*4882a593Smuzhiyun break;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun case CHELSIO_T6:
2660*4882a593Smuzhiyun reg_ranges = t6_reg_ranges;
2661*4882a593Smuzhiyun reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2662*4882a593Smuzhiyun break;
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun default:
2665*4882a593Smuzhiyun dev_err(adap->pdev_dev,
2666*4882a593Smuzhiyun "Unsupported chip version %d\n", chip_version);
2667*4882a593Smuzhiyun return;
2668*4882a593Smuzhiyun }
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun /* Clear the register buffer and insert the appropriate register
2671*4882a593Smuzhiyun * values selected by the above register ranges.
2672*4882a593Smuzhiyun */
2673*4882a593Smuzhiyun memset(buf, 0, buf_size);
2674*4882a593Smuzhiyun for (range = 0; range < reg_ranges_size; range += 2) {
2675*4882a593Smuzhiyun unsigned int reg = reg_ranges[range];
2676*4882a593Smuzhiyun unsigned int last_reg = reg_ranges[range + 1];
2677*4882a593Smuzhiyun u32 *bufp = (u32 *)((char *)buf + reg);
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun /* Iterate across the register range filling in the register
2680*4882a593Smuzhiyun * buffer but don't write past the end of the register buffer.
2681*4882a593Smuzhiyun */
2682*4882a593Smuzhiyun while (reg <= last_reg && bufp < buf_end) {
2683*4882a593Smuzhiyun *bufp++ = t4_read_reg(adap, reg);
2684*4882a593Smuzhiyun reg += sizeof(u32);
2685*4882a593Smuzhiyun }
2686*4882a593Smuzhiyun }
2687*4882a593Smuzhiyun }
2688*4882a593Smuzhiyun
2689*4882a593Smuzhiyun #define EEPROM_STAT_ADDR 0x7bfc
2690*4882a593Smuzhiyun #define VPD_BASE 0x400
2691*4882a593Smuzhiyun #define VPD_BASE_OLD 0
2692*4882a593Smuzhiyun #define VPD_LEN 1024
2693*4882a593Smuzhiyun #define CHELSIO_VPD_UNIQUE_ID 0x82
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun /**
2696*4882a593Smuzhiyun * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2697*4882a593Smuzhiyun * @phys_addr: the physical EEPROM address
2698*4882a593Smuzhiyun * @fn: the PCI function number
2699*4882a593Smuzhiyun * @sz: size of function-specific area
2700*4882a593Smuzhiyun *
2701*4882a593Smuzhiyun * Translate a physical EEPROM address to virtual. The first 1K is
2702*4882a593Smuzhiyun * accessed through virtual addresses starting at 31K, the rest is
2703*4882a593Smuzhiyun * accessed through virtual addresses starting at 0.
2704*4882a593Smuzhiyun *
2705*4882a593Smuzhiyun * The mapping is as follows:
2706*4882a593Smuzhiyun * [0..1K) -> [31K..32K)
2707*4882a593Smuzhiyun * [1K..1K+A) -> [31K-A..31K)
2708*4882a593Smuzhiyun * [1K+A..ES) -> [0..ES-A-1K)
2709*4882a593Smuzhiyun *
2710*4882a593Smuzhiyun * where A = @fn * @sz, and ES = EEPROM size.
2711*4882a593Smuzhiyun */
t4_eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)2712*4882a593Smuzhiyun int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2713*4882a593Smuzhiyun {
2714*4882a593Smuzhiyun fn *= sz;
2715*4882a593Smuzhiyun if (phys_addr < 1024)
2716*4882a593Smuzhiyun return phys_addr + (31 << 10);
2717*4882a593Smuzhiyun if (phys_addr < 1024 + fn)
2718*4882a593Smuzhiyun return 31744 - fn + phys_addr - 1024;
2719*4882a593Smuzhiyun if (phys_addr < EEPROMSIZE)
2720*4882a593Smuzhiyun return phys_addr - 1024 - fn;
2721*4882a593Smuzhiyun return -EINVAL;
2722*4882a593Smuzhiyun }
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun /**
2725*4882a593Smuzhiyun * t4_seeprom_wp - enable/disable EEPROM write protection
2726*4882a593Smuzhiyun * @adapter: the adapter
2727*4882a593Smuzhiyun * @enable: whether to enable or disable write protection
2728*4882a593Smuzhiyun *
2729*4882a593Smuzhiyun * Enables or disables write protection on the serial EEPROM.
2730*4882a593Smuzhiyun */
t4_seeprom_wp(struct adapter * adapter,bool enable)2731*4882a593Smuzhiyun int t4_seeprom_wp(struct adapter *adapter, bool enable)
2732*4882a593Smuzhiyun {
2733*4882a593Smuzhiyun unsigned int v = enable ? 0xc : 0;
2734*4882a593Smuzhiyun int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2735*4882a593Smuzhiyun return ret < 0 ? ret : 0;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun
2738*4882a593Smuzhiyun /**
2739*4882a593Smuzhiyun * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2740*4882a593Smuzhiyun * @adapter: adapter to read
2741*4882a593Smuzhiyun * @p: where to store the parameters
2742*4882a593Smuzhiyun *
2743*4882a593Smuzhiyun * Reads card parameters stored in VPD EEPROM.
2744*4882a593Smuzhiyun */
t4_get_raw_vpd_params(struct adapter * adapter,struct vpd_params * p)2745*4882a593Smuzhiyun int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2746*4882a593Smuzhiyun {
2747*4882a593Smuzhiyun int i, ret = 0, addr;
2748*4882a593Smuzhiyun int ec, sn, pn, na;
2749*4882a593Smuzhiyun u8 *vpd, csum;
2750*4882a593Smuzhiyun unsigned int vpdr_len, kw_offset, id_len;
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun vpd = vmalloc(VPD_LEN);
2753*4882a593Smuzhiyun if (!vpd)
2754*4882a593Smuzhiyun return -ENOMEM;
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun /* Card information normally starts at VPD_BASE but early cards had
2757*4882a593Smuzhiyun * it at 0.
2758*4882a593Smuzhiyun */
2759*4882a593Smuzhiyun ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2760*4882a593Smuzhiyun if (ret < 0)
2761*4882a593Smuzhiyun goto out;
2762*4882a593Smuzhiyun
2763*4882a593Smuzhiyun /* The VPD shall have a unique identifier specified by the PCI SIG.
2764*4882a593Smuzhiyun * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2765*4882a593Smuzhiyun * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2766*4882a593Smuzhiyun * is expected to automatically put this entry at the
2767*4882a593Smuzhiyun * beginning of the VPD.
2768*4882a593Smuzhiyun */
2769*4882a593Smuzhiyun addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2772*4882a593Smuzhiyun if (ret < 0)
2773*4882a593Smuzhiyun goto out;
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2776*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2777*4882a593Smuzhiyun ret = -EINVAL;
2778*4882a593Smuzhiyun goto out;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun id_len = pci_vpd_lrdt_size(vpd);
2782*4882a593Smuzhiyun if (id_len > ID_LEN)
2783*4882a593Smuzhiyun id_len = ID_LEN;
2784*4882a593Smuzhiyun
2785*4882a593Smuzhiyun i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2786*4882a593Smuzhiyun if (i < 0) {
2787*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2788*4882a593Smuzhiyun ret = -EINVAL;
2789*4882a593Smuzhiyun goto out;
2790*4882a593Smuzhiyun }
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2793*4882a593Smuzhiyun kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2794*4882a593Smuzhiyun if (vpdr_len + kw_offset > VPD_LEN) {
2795*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2796*4882a593Smuzhiyun ret = -EINVAL;
2797*4882a593Smuzhiyun goto out;
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun #define FIND_VPD_KW(var, name) do { \
2801*4882a593Smuzhiyun var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2802*4882a593Smuzhiyun if (var < 0) { \
2803*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2804*4882a593Smuzhiyun ret = -EINVAL; \
2805*4882a593Smuzhiyun goto out; \
2806*4882a593Smuzhiyun } \
2807*4882a593Smuzhiyun var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2808*4882a593Smuzhiyun } while (0)
2809*4882a593Smuzhiyun
2810*4882a593Smuzhiyun FIND_VPD_KW(i, "RV");
2811*4882a593Smuzhiyun for (csum = 0; i >= 0; i--)
2812*4882a593Smuzhiyun csum += vpd[i];
2813*4882a593Smuzhiyun
2814*4882a593Smuzhiyun if (csum) {
2815*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
2816*4882a593Smuzhiyun "corrupted VPD EEPROM, actual csum %u\n", csum);
2817*4882a593Smuzhiyun ret = -EINVAL;
2818*4882a593Smuzhiyun goto out;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun
2821*4882a593Smuzhiyun FIND_VPD_KW(ec, "EC");
2822*4882a593Smuzhiyun FIND_VPD_KW(sn, "SN");
2823*4882a593Smuzhiyun FIND_VPD_KW(pn, "PN");
2824*4882a593Smuzhiyun FIND_VPD_KW(na, "NA");
2825*4882a593Smuzhiyun #undef FIND_VPD_KW
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2828*4882a593Smuzhiyun strim(p->id);
2829*4882a593Smuzhiyun memcpy(p->ec, vpd + ec, EC_LEN);
2830*4882a593Smuzhiyun strim(p->ec);
2831*4882a593Smuzhiyun i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2832*4882a593Smuzhiyun memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2833*4882a593Smuzhiyun strim(p->sn);
2834*4882a593Smuzhiyun i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2835*4882a593Smuzhiyun memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2836*4882a593Smuzhiyun strim(p->pn);
2837*4882a593Smuzhiyun memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2838*4882a593Smuzhiyun strim((char *)p->na);
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun out:
2841*4882a593Smuzhiyun vfree(vpd);
2842*4882a593Smuzhiyun return ret < 0 ? ret : 0;
2843*4882a593Smuzhiyun }
2844*4882a593Smuzhiyun
2845*4882a593Smuzhiyun /**
2846*4882a593Smuzhiyun * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2847*4882a593Smuzhiyun * @adapter: adapter to read
2848*4882a593Smuzhiyun * @p: where to store the parameters
2849*4882a593Smuzhiyun *
2850*4882a593Smuzhiyun * Reads card parameters stored in VPD EEPROM and retrieves the Core
2851*4882a593Smuzhiyun * Clock. This can only be called after a connection to the firmware
2852*4882a593Smuzhiyun * is established.
2853*4882a593Smuzhiyun */
t4_get_vpd_params(struct adapter * adapter,struct vpd_params * p)2854*4882a593Smuzhiyun int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2855*4882a593Smuzhiyun {
2856*4882a593Smuzhiyun u32 cclk_param, cclk_val;
2857*4882a593Smuzhiyun int ret;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun /* Grab the raw VPD parameters.
2860*4882a593Smuzhiyun */
2861*4882a593Smuzhiyun ret = t4_get_raw_vpd_params(adapter, p);
2862*4882a593Smuzhiyun if (ret)
2863*4882a593Smuzhiyun return ret;
2864*4882a593Smuzhiyun
2865*4882a593Smuzhiyun /* Ask firmware for the Core Clock since it knows how to translate the
2866*4882a593Smuzhiyun * Reference Clock ('V2') VPD field into a Core Clock value ...
2867*4882a593Smuzhiyun */
2868*4882a593Smuzhiyun cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2869*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2870*4882a593Smuzhiyun ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2871*4882a593Smuzhiyun 1, &cclk_param, &cclk_val);
2872*4882a593Smuzhiyun
2873*4882a593Smuzhiyun if (ret)
2874*4882a593Smuzhiyun return ret;
2875*4882a593Smuzhiyun p->cclk = cclk_val;
2876*4882a593Smuzhiyun
2877*4882a593Smuzhiyun return 0;
2878*4882a593Smuzhiyun }
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun /**
2881*4882a593Smuzhiyun * t4_get_pfres - retrieve VF resource limits
2882*4882a593Smuzhiyun * @adapter: the adapter
2883*4882a593Smuzhiyun *
2884*4882a593Smuzhiyun * Retrieves configured resource limits and capabilities for a physical
2885*4882a593Smuzhiyun * function. The results are stored in @adapter->pfres.
2886*4882a593Smuzhiyun */
t4_get_pfres(struct adapter * adapter)2887*4882a593Smuzhiyun int t4_get_pfres(struct adapter *adapter)
2888*4882a593Smuzhiyun {
2889*4882a593Smuzhiyun struct pf_resources *pfres = &adapter->params.pfres;
2890*4882a593Smuzhiyun struct fw_pfvf_cmd cmd, rpl;
2891*4882a593Smuzhiyun int v;
2892*4882a593Smuzhiyun u32 word;
2893*4882a593Smuzhiyun
2894*4882a593Smuzhiyun /* Execute PFVF Read command to get VF resource limits; bail out early
2895*4882a593Smuzhiyun * with error on command failure.
2896*4882a593Smuzhiyun */
2897*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
2898*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2899*4882a593Smuzhiyun FW_CMD_REQUEST_F |
2900*4882a593Smuzhiyun FW_CMD_READ_F |
2901*4882a593Smuzhiyun FW_PFVF_CMD_PFN_V(adapter->pf) |
2902*4882a593Smuzhiyun FW_PFVF_CMD_VFN_V(0));
2903*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2904*4882a593Smuzhiyun v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2905*4882a593Smuzhiyun if (v != FW_SUCCESS)
2906*4882a593Smuzhiyun return v;
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun /* Extract PF resource limits and return success.
2909*4882a593Smuzhiyun */
2910*4882a593Smuzhiyun word = be32_to_cpu(rpl.niqflint_niq);
2911*4882a593Smuzhiyun pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2912*4882a593Smuzhiyun pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2913*4882a593Smuzhiyun
2914*4882a593Smuzhiyun word = be32_to_cpu(rpl.type_to_neq);
2915*4882a593Smuzhiyun pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2916*4882a593Smuzhiyun pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun word = be32_to_cpu(rpl.tc_to_nexactf);
2919*4882a593Smuzhiyun pfres->tc = FW_PFVF_CMD_TC_G(word);
2920*4882a593Smuzhiyun pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2921*4882a593Smuzhiyun pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2922*4882a593Smuzhiyun
2923*4882a593Smuzhiyun word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2924*4882a593Smuzhiyun pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2925*4882a593Smuzhiyun pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2926*4882a593Smuzhiyun pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2927*4882a593Smuzhiyun
2928*4882a593Smuzhiyun return 0;
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun /* serial flash and firmware constants */
2932*4882a593Smuzhiyun enum {
2933*4882a593Smuzhiyun SF_ATTEMPTS = 10, /* max retries for SF operations */
2934*4882a593Smuzhiyun
2935*4882a593Smuzhiyun /* flash command opcodes */
2936*4882a593Smuzhiyun SF_PROG_PAGE = 2, /* program page */
2937*4882a593Smuzhiyun SF_WR_DISABLE = 4, /* disable writes */
2938*4882a593Smuzhiyun SF_RD_STATUS = 5, /* read status register */
2939*4882a593Smuzhiyun SF_WR_ENABLE = 6, /* enable writes */
2940*4882a593Smuzhiyun SF_RD_DATA_FAST = 0xb, /* read flash */
2941*4882a593Smuzhiyun SF_RD_ID = 0x9f, /* read ID */
2942*4882a593Smuzhiyun SF_ERASE_SECTOR = 0xd8, /* erase sector */
2943*4882a593Smuzhiyun };
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun /**
2946*4882a593Smuzhiyun * sf1_read - read data from the serial flash
2947*4882a593Smuzhiyun * @adapter: the adapter
2948*4882a593Smuzhiyun * @byte_cnt: number of bytes to read
2949*4882a593Smuzhiyun * @cont: whether another operation will be chained
2950*4882a593Smuzhiyun * @lock: whether to lock SF for PL access only
2951*4882a593Smuzhiyun * @valp: where to store the read data
2952*4882a593Smuzhiyun *
2953*4882a593Smuzhiyun * Reads up to 4 bytes of data from the serial flash. The location of
2954*4882a593Smuzhiyun * the read needs to be specified prior to calling this by issuing the
2955*4882a593Smuzhiyun * appropriate commands to the serial flash.
2956*4882a593Smuzhiyun */
sf1_read(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 * valp)2957*4882a593Smuzhiyun static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2958*4882a593Smuzhiyun int lock, u32 *valp)
2959*4882a593Smuzhiyun {
2960*4882a593Smuzhiyun int ret;
2961*4882a593Smuzhiyun
2962*4882a593Smuzhiyun if (!byte_cnt || byte_cnt > 4)
2963*4882a593Smuzhiyun return -EINVAL;
2964*4882a593Smuzhiyun if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2965*4882a593Smuzhiyun return -EBUSY;
2966*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2967*4882a593Smuzhiyun SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2968*4882a593Smuzhiyun ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2969*4882a593Smuzhiyun if (!ret)
2970*4882a593Smuzhiyun *valp = t4_read_reg(adapter, SF_DATA_A);
2971*4882a593Smuzhiyun return ret;
2972*4882a593Smuzhiyun }
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun /**
2975*4882a593Smuzhiyun * sf1_write - write data to the serial flash
2976*4882a593Smuzhiyun * @adapter: the adapter
2977*4882a593Smuzhiyun * @byte_cnt: number of bytes to write
2978*4882a593Smuzhiyun * @cont: whether another operation will be chained
2979*4882a593Smuzhiyun * @lock: whether to lock SF for PL access only
2980*4882a593Smuzhiyun * @val: value to write
2981*4882a593Smuzhiyun *
2982*4882a593Smuzhiyun * Writes up to 4 bytes of data to the serial flash. The location of
2983*4882a593Smuzhiyun * the write needs to be specified prior to calling this by issuing the
2984*4882a593Smuzhiyun * appropriate commands to the serial flash.
2985*4882a593Smuzhiyun */
sf1_write(struct adapter * adapter,unsigned int byte_cnt,int cont,int lock,u32 val)2986*4882a593Smuzhiyun static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2987*4882a593Smuzhiyun int lock, u32 val)
2988*4882a593Smuzhiyun {
2989*4882a593Smuzhiyun if (!byte_cnt || byte_cnt > 4)
2990*4882a593Smuzhiyun return -EINVAL;
2991*4882a593Smuzhiyun if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2992*4882a593Smuzhiyun return -EBUSY;
2993*4882a593Smuzhiyun t4_write_reg(adapter, SF_DATA_A, val);
2994*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2995*4882a593Smuzhiyun SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
2996*4882a593Smuzhiyun return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2997*4882a593Smuzhiyun }
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun /**
3000*4882a593Smuzhiyun * flash_wait_op - wait for a flash operation to complete
3001*4882a593Smuzhiyun * @adapter: the adapter
3002*4882a593Smuzhiyun * @attempts: max number of polls of the status register
3003*4882a593Smuzhiyun * @delay: delay between polls in ms
3004*4882a593Smuzhiyun *
3005*4882a593Smuzhiyun * Wait for a flash operation to complete by polling the status register.
3006*4882a593Smuzhiyun */
flash_wait_op(struct adapter * adapter,int attempts,int delay)3007*4882a593Smuzhiyun static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun int ret;
3010*4882a593Smuzhiyun u32 status;
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun while (1) {
3013*4882a593Smuzhiyun if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3014*4882a593Smuzhiyun (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3015*4882a593Smuzhiyun return ret;
3016*4882a593Smuzhiyun if (!(status & 1))
3017*4882a593Smuzhiyun return 0;
3018*4882a593Smuzhiyun if (--attempts == 0)
3019*4882a593Smuzhiyun return -EAGAIN;
3020*4882a593Smuzhiyun if (delay)
3021*4882a593Smuzhiyun msleep(delay);
3022*4882a593Smuzhiyun }
3023*4882a593Smuzhiyun }
3024*4882a593Smuzhiyun
3025*4882a593Smuzhiyun /**
3026*4882a593Smuzhiyun * t4_read_flash - read words from serial flash
3027*4882a593Smuzhiyun * @adapter: the adapter
3028*4882a593Smuzhiyun * @addr: the start address for the read
3029*4882a593Smuzhiyun * @nwords: how many 32-bit words to read
3030*4882a593Smuzhiyun * @data: where to store the read data
3031*4882a593Smuzhiyun * @byte_oriented: whether to store data as bytes or as words
3032*4882a593Smuzhiyun *
3033*4882a593Smuzhiyun * Read the specified number of 32-bit words from the serial flash.
3034*4882a593Smuzhiyun * If @byte_oriented is set the read data is stored as a byte array
3035*4882a593Smuzhiyun * (i.e., big-endian), otherwise as 32-bit words in the platform's
3036*4882a593Smuzhiyun * natural endianness.
3037*4882a593Smuzhiyun */
t4_read_flash(struct adapter * adapter,unsigned int addr,unsigned int nwords,u32 * data,int byte_oriented)3038*4882a593Smuzhiyun int t4_read_flash(struct adapter *adapter, unsigned int addr,
3039*4882a593Smuzhiyun unsigned int nwords, u32 *data, int byte_oriented)
3040*4882a593Smuzhiyun {
3041*4882a593Smuzhiyun int ret;
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3044*4882a593Smuzhiyun return -EINVAL;
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun addr = swab32(addr) | SF_RD_DATA_FAST;
3047*4882a593Smuzhiyun
3048*4882a593Smuzhiyun if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3049*4882a593Smuzhiyun (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3050*4882a593Smuzhiyun return ret;
3051*4882a593Smuzhiyun
3052*4882a593Smuzhiyun for ( ; nwords; nwords--, data++) {
3053*4882a593Smuzhiyun ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3054*4882a593Smuzhiyun if (nwords == 1)
3055*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3056*4882a593Smuzhiyun if (ret)
3057*4882a593Smuzhiyun return ret;
3058*4882a593Smuzhiyun if (byte_oriented)
3059*4882a593Smuzhiyun *data = (__force __u32)(cpu_to_be32(*data));
3060*4882a593Smuzhiyun }
3061*4882a593Smuzhiyun return 0;
3062*4882a593Smuzhiyun }
3063*4882a593Smuzhiyun
3064*4882a593Smuzhiyun /**
3065*4882a593Smuzhiyun * t4_write_flash - write up to a page of data to the serial flash
3066*4882a593Smuzhiyun * @adapter: the adapter
3067*4882a593Smuzhiyun * @addr: the start address to write
3068*4882a593Smuzhiyun * @n: length of data to write in bytes
3069*4882a593Smuzhiyun * @data: the data to write
3070*4882a593Smuzhiyun * @byte_oriented: whether to store data as bytes or as words
3071*4882a593Smuzhiyun *
3072*4882a593Smuzhiyun * Writes up to a page of data (256 bytes) to the serial flash starting
3073*4882a593Smuzhiyun * at the given address. All the data must be written to the same page.
3074*4882a593Smuzhiyun * If @byte_oriented is set the write data is stored as byte stream
3075*4882a593Smuzhiyun * (i.e. matches what on disk), otherwise in big-endian.
3076*4882a593Smuzhiyun */
t4_write_flash(struct adapter * adapter,unsigned int addr,unsigned int n,const u8 * data,bool byte_oriented)3077*4882a593Smuzhiyun static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3078*4882a593Smuzhiyun unsigned int n, const u8 *data, bool byte_oriented)
3079*4882a593Smuzhiyun {
3080*4882a593Smuzhiyun unsigned int i, c, left, val, offset = addr & 0xff;
3081*4882a593Smuzhiyun u32 buf[64];
3082*4882a593Smuzhiyun int ret;
3083*4882a593Smuzhiyun
3084*4882a593Smuzhiyun if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3085*4882a593Smuzhiyun return -EINVAL;
3086*4882a593Smuzhiyun
3087*4882a593Smuzhiyun val = swab32(addr) | SF_PROG_PAGE;
3088*4882a593Smuzhiyun
3089*4882a593Smuzhiyun if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3090*4882a593Smuzhiyun (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3091*4882a593Smuzhiyun goto unlock;
3092*4882a593Smuzhiyun
3093*4882a593Smuzhiyun for (left = n; left; left -= c, data += c) {
3094*4882a593Smuzhiyun c = min(left, 4U);
3095*4882a593Smuzhiyun for (val = 0, i = 0; i < c; ++i) {
3096*4882a593Smuzhiyun if (byte_oriented)
3097*4882a593Smuzhiyun val = (val << 8) + data[i];
3098*4882a593Smuzhiyun else
3099*4882a593Smuzhiyun val = (val << 8) + data[c - i - 1];
3100*4882a593Smuzhiyun }
3101*4882a593Smuzhiyun
3102*4882a593Smuzhiyun ret = sf1_write(adapter, c, c != left, 1, val);
3103*4882a593Smuzhiyun if (ret)
3104*4882a593Smuzhiyun goto unlock;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun ret = flash_wait_op(adapter, 8, 1);
3107*4882a593Smuzhiyun if (ret)
3108*4882a593Smuzhiyun goto unlock;
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyun /* Read the page to verify the write succeeded */
3113*4882a593Smuzhiyun ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
3114*4882a593Smuzhiyun byte_oriented);
3115*4882a593Smuzhiyun if (ret)
3116*4882a593Smuzhiyun return ret;
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun if (memcmp(data - n, (u8 *)buf + offset, n)) {
3119*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
3120*4882a593Smuzhiyun "failed to correctly write the flash page at %#x\n",
3121*4882a593Smuzhiyun addr);
3122*4882a593Smuzhiyun return -EIO;
3123*4882a593Smuzhiyun }
3124*4882a593Smuzhiyun return 0;
3125*4882a593Smuzhiyun
3126*4882a593Smuzhiyun unlock:
3127*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3128*4882a593Smuzhiyun return ret;
3129*4882a593Smuzhiyun }
3130*4882a593Smuzhiyun
3131*4882a593Smuzhiyun /**
3132*4882a593Smuzhiyun * t4_get_fw_version - read the firmware version
3133*4882a593Smuzhiyun * @adapter: the adapter
3134*4882a593Smuzhiyun * @vers: where to place the version
3135*4882a593Smuzhiyun *
3136*4882a593Smuzhiyun * Reads the FW version from flash.
3137*4882a593Smuzhiyun */
t4_get_fw_version(struct adapter * adapter,u32 * vers)3138*4882a593Smuzhiyun int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3139*4882a593Smuzhiyun {
3140*4882a593Smuzhiyun return t4_read_flash(adapter, FLASH_FW_START +
3141*4882a593Smuzhiyun offsetof(struct fw_hdr, fw_ver), 1,
3142*4882a593Smuzhiyun vers, 0);
3143*4882a593Smuzhiyun }
3144*4882a593Smuzhiyun
3145*4882a593Smuzhiyun /**
3146*4882a593Smuzhiyun * t4_get_bs_version - read the firmware bootstrap version
3147*4882a593Smuzhiyun * @adapter: the adapter
3148*4882a593Smuzhiyun * @vers: where to place the version
3149*4882a593Smuzhiyun *
3150*4882a593Smuzhiyun * Reads the FW Bootstrap version from flash.
3151*4882a593Smuzhiyun */
t4_get_bs_version(struct adapter * adapter,u32 * vers)3152*4882a593Smuzhiyun int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3153*4882a593Smuzhiyun {
3154*4882a593Smuzhiyun return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3155*4882a593Smuzhiyun offsetof(struct fw_hdr, fw_ver), 1,
3156*4882a593Smuzhiyun vers, 0);
3157*4882a593Smuzhiyun }
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun /**
3160*4882a593Smuzhiyun * t4_get_tp_version - read the TP microcode version
3161*4882a593Smuzhiyun * @adapter: the adapter
3162*4882a593Smuzhiyun * @vers: where to place the version
3163*4882a593Smuzhiyun *
3164*4882a593Smuzhiyun * Reads the TP microcode version from flash.
3165*4882a593Smuzhiyun */
t4_get_tp_version(struct adapter * adapter,u32 * vers)3166*4882a593Smuzhiyun int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3167*4882a593Smuzhiyun {
3168*4882a593Smuzhiyun return t4_read_flash(adapter, FLASH_FW_START +
3169*4882a593Smuzhiyun offsetof(struct fw_hdr, tp_microcode_ver),
3170*4882a593Smuzhiyun 1, vers, 0);
3171*4882a593Smuzhiyun }
3172*4882a593Smuzhiyun
3173*4882a593Smuzhiyun /**
3174*4882a593Smuzhiyun * t4_get_exprom_version - return the Expansion ROM version (if any)
3175*4882a593Smuzhiyun * @adap: the adapter
3176*4882a593Smuzhiyun * @vers: where to place the version
3177*4882a593Smuzhiyun *
3178*4882a593Smuzhiyun * Reads the Expansion ROM header from FLASH and returns the version
3179*4882a593Smuzhiyun * number (if present) through the @vers return value pointer. We return
3180*4882a593Smuzhiyun * this in the Firmware Version Format since it's convenient. Return
3181*4882a593Smuzhiyun * 0 on success, -ENOENT if no Expansion ROM is present.
3182*4882a593Smuzhiyun */
t4_get_exprom_version(struct adapter * adap,u32 * vers)3183*4882a593Smuzhiyun int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3184*4882a593Smuzhiyun {
3185*4882a593Smuzhiyun struct exprom_header {
3186*4882a593Smuzhiyun unsigned char hdr_arr[16]; /* must start with 0x55aa */
3187*4882a593Smuzhiyun unsigned char hdr_ver[4]; /* Expansion ROM version */
3188*4882a593Smuzhiyun } *hdr;
3189*4882a593Smuzhiyun u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3190*4882a593Smuzhiyun sizeof(u32))];
3191*4882a593Smuzhiyun int ret;
3192*4882a593Smuzhiyun
3193*4882a593Smuzhiyun ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3194*4882a593Smuzhiyun ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3195*4882a593Smuzhiyun 0);
3196*4882a593Smuzhiyun if (ret)
3197*4882a593Smuzhiyun return ret;
3198*4882a593Smuzhiyun
3199*4882a593Smuzhiyun hdr = (struct exprom_header *)exprom_header_buf;
3200*4882a593Smuzhiyun if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3201*4882a593Smuzhiyun return -ENOENT;
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3204*4882a593Smuzhiyun FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3205*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3206*4882a593Smuzhiyun FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3207*4882a593Smuzhiyun return 0;
3208*4882a593Smuzhiyun }
3209*4882a593Smuzhiyun
3210*4882a593Smuzhiyun /**
3211*4882a593Smuzhiyun * t4_get_vpd_version - return the VPD version
3212*4882a593Smuzhiyun * @adapter: the adapter
3213*4882a593Smuzhiyun * @vers: where to place the version
3214*4882a593Smuzhiyun *
3215*4882a593Smuzhiyun * Reads the VPD via the Firmware interface (thus this can only be called
3216*4882a593Smuzhiyun * once we're ready to issue Firmware commands). The format of the
3217*4882a593Smuzhiyun * VPD version is adapter specific. Returns 0 on success, an error on
3218*4882a593Smuzhiyun * failure.
3219*4882a593Smuzhiyun *
3220*4882a593Smuzhiyun * Note that early versions of the Firmware didn't include the ability
3221*4882a593Smuzhiyun * to retrieve the VPD version, so we zero-out the return-value parameter
3222*4882a593Smuzhiyun * in that case to avoid leaving it with garbage in it.
3223*4882a593Smuzhiyun *
3224*4882a593Smuzhiyun * Also note that the Firmware will return its cached copy of the VPD
3225*4882a593Smuzhiyun * Revision ID, not the actual Revision ID as written in the Serial
3226*4882a593Smuzhiyun * EEPROM. This is only an issue if a new VPD has been written and the
3227*4882a593Smuzhiyun * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3228*4882a593Smuzhiyun * to defer calling this routine till after a FW_RESET_CMD has been issued
3229*4882a593Smuzhiyun * if the Host Driver will be performing a full adapter initialization.
3230*4882a593Smuzhiyun */
t4_get_vpd_version(struct adapter * adapter,u32 * vers)3231*4882a593Smuzhiyun int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3232*4882a593Smuzhiyun {
3233*4882a593Smuzhiyun u32 vpdrev_param;
3234*4882a593Smuzhiyun int ret;
3235*4882a593Smuzhiyun
3236*4882a593Smuzhiyun vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3237*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3238*4882a593Smuzhiyun ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3239*4882a593Smuzhiyun 1, &vpdrev_param, vers);
3240*4882a593Smuzhiyun if (ret)
3241*4882a593Smuzhiyun *vers = 0;
3242*4882a593Smuzhiyun return ret;
3243*4882a593Smuzhiyun }
3244*4882a593Smuzhiyun
3245*4882a593Smuzhiyun /**
3246*4882a593Smuzhiyun * t4_get_scfg_version - return the Serial Configuration version
3247*4882a593Smuzhiyun * @adapter: the adapter
3248*4882a593Smuzhiyun * @vers: where to place the version
3249*4882a593Smuzhiyun *
3250*4882a593Smuzhiyun * Reads the Serial Configuration Version via the Firmware interface
3251*4882a593Smuzhiyun * (thus this can only be called once we're ready to issue Firmware
3252*4882a593Smuzhiyun * commands). The format of the Serial Configuration version is
3253*4882a593Smuzhiyun * adapter specific. Returns 0 on success, an error on failure.
3254*4882a593Smuzhiyun *
3255*4882a593Smuzhiyun * Note that early versions of the Firmware didn't include the ability
3256*4882a593Smuzhiyun * to retrieve the Serial Configuration version, so we zero-out the
3257*4882a593Smuzhiyun * return-value parameter in that case to avoid leaving it with
3258*4882a593Smuzhiyun * garbage in it.
3259*4882a593Smuzhiyun *
3260*4882a593Smuzhiyun * Also note that the Firmware will return its cached copy of the Serial
3261*4882a593Smuzhiyun * Initialization Revision ID, not the actual Revision ID as written in
3262*4882a593Smuzhiyun * the Serial EEPROM. This is only an issue if a new VPD has been written
3263*4882a593Smuzhiyun * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3264*4882a593Smuzhiyun * it's best to defer calling this routine till after a FW_RESET_CMD has
3265*4882a593Smuzhiyun * been issued if the Host Driver will be performing a full adapter
3266*4882a593Smuzhiyun * initialization.
3267*4882a593Smuzhiyun */
t4_get_scfg_version(struct adapter * adapter,u32 * vers)3268*4882a593Smuzhiyun int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3269*4882a593Smuzhiyun {
3270*4882a593Smuzhiyun u32 scfgrev_param;
3271*4882a593Smuzhiyun int ret;
3272*4882a593Smuzhiyun
3273*4882a593Smuzhiyun scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3274*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3275*4882a593Smuzhiyun ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3276*4882a593Smuzhiyun 1, &scfgrev_param, vers);
3277*4882a593Smuzhiyun if (ret)
3278*4882a593Smuzhiyun *vers = 0;
3279*4882a593Smuzhiyun return ret;
3280*4882a593Smuzhiyun }
3281*4882a593Smuzhiyun
3282*4882a593Smuzhiyun /**
3283*4882a593Smuzhiyun * t4_get_version_info - extract various chip/firmware version information
3284*4882a593Smuzhiyun * @adapter: the adapter
3285*4882a593Smuzhiyun *
3286*4882a593Smuzhiyun * Reads various chip/firmware version numbers and stores them into the
3287*4882a593Smuzhiyun * adapter Adapter Parameters structure. If any of the efforts fails
3288*4882a593Smuzhiyun * the first failure will be returned, but all of the version numbers
3289*4882a593Smuzhiyun * will be read.
3290*4882a593Smuzhiyun */
t4_get_version_info(struct adapter * adapter)3291*4882a593Smuzhiyun int t4_get_version_info(struct adapter *adapter)
3292*4882a593Smuzhiyun {
3293*4882a593Smuzhiyun int ret = 0;
3294*4882a593Smuzhiyun
3295*4882a593Smuzhiyun #define FIRST_RET(__getvinfo) \
3296*4882a593Smuzhiyun do { \
3297*4882a593Smuzhiyun int __ret = __getvinfo; \
3298*4882a593Smuzhiyun if (__ret && !ret) \
3299*4882a593Smuzhiyun ret = __ret; \
3300*4882a593Smuzhiyun } while (0)
3301*4882a593Smuzhiyun
3302*4882a593Smuzhiyun FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3303*4882a593Smuzhiyun FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3304*4882a593Smuzhiyun FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3305*4882a593Smuzhiyun FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3306*4882a593Smuzhiyun FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3307*4882a593Smuzhiyun FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3308*4882a593Smuzhiyun
3309*4882a593Smuzhiyun #undef FIRST_RET
3310*4882a593Smuzhiyun return ret;
3311*4882a593Smuzhiyun }
3312*4882a593Smuzhiyun
3313*4882a593Smuzhiyun /**
3314*4882a593Smuzhiyun * t4_dump_version_info - dump all of the adapter configuration IDs
3315*4882a593Smuzhiyun * @adapter: the adapter
3316*4882a593Smuzhiyun *
3317*4882a593Smuzhiyun * Dumps all of the various bits of adapter configuration version/revision
3318*4882a593Smuzhiyun * IDs information. This is typically called at some point after
3319*4882a593Smuzhiyun * t4_get_version_info() has been called.
3320*4882a593Smuzhiyun */
t4_dump_version_info(struct adapter * adapter)3321*4882a593Smuzhiyun void t4_dump_version_info(struct adapter *adapter)
3322*4882a593Smuzhiyun {
3323*4882a593Smuzhiyun /* Device information */
3324*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3325*4882a593Smuzhiyun adapter->params.vpd.id,
3326*4882a593Smuzhiyun CHELSIO_CHIP_RELEASE(adapter->params.chip));
3327*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3328*4882a593Smuzhiyun adapter->params.vpd.sn, adapter->params.vpd.pn);
3329*4882a593Smuzhiyun
3330*4882a593Smuzhiyun /* Firmware Version */
3331*4882a593Smuzhiyun if (!adapter->params.fw_vers)
3332*4882a593Smuzhiyun dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3333*4882a593Smuzhiyun else
3334*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3335*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3336*4882a593Smuzhiyun FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3337*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3338*4882a593Smuzhiyun FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3339*4882a593Smuzhiyun
3340*4882a593Smuzhiyun /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3341*4882a593Smuzhiyun * Firmware, so dev_info() is more appropriate here.)
3342*4882a593Smuzhiyun */
3343*4882a593Smuzhiyun if (!adapter->params.bs_vers)
3344*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3345*4882a593Smuzhiyun else
3346*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3347*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3348*4882a593Smuzhiyun FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3349*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3350*4882a593Smuzhiyun FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3351*4882a593Smuzhiyun
3352*4882a593Smuzhiyun /* TP Microcode Version */
3353*4882a593Smuzhiyun if (!adapter->params.tp_vers)
3354*4882a593Smuzhiyun dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3355*4882a593Smuzhiyun else
3356*4882a593Smuzhiyun dev_info(adapter->pdev_dev,
3357*4882a593Smuzhiyun "TP Microcode version: %u.%u.%u.%u\n",
3358*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3359*4882a593Smuzhiyun FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3360*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3361*4882a593Smuzhiyun FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3362*4882a593Smuzhiyun
3363*4882a593Smuzhiyun /* Expansion ROM version */
3364*4882a593Smuzhiyun if (!adapter->params.er_vers)
3365*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3366*4882a593Smuzhiyun else
3367*4882a593Smuzhiyun dev_info(adapter->pdev_dev,
3368*4882a593Smuzhiyun "Expansion ROM version: %u.%u.%u.%u\n",
3369*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3370*4882a593Smuzhiyun FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3371*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3372*4882a593Smuzhiyun FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3373*4882a593Smuzhiyun
3374*4882a593Smuzhiyun /* Serial Configuration version */
3375*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3376*4882a593Smuzhiyun adapter->params.scfg_vers);
3377*4882a593Smuzhiyun
3378*4882a593Smuzhiyun /* VPD Version */
3379*4882a593Smuzhiyun dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3380*4882a593Smuzhiyun adapter->params.vpd_vers);
3381*4882a593Smuzhiyun }
3382*4882a593Smuzhiyun
3383*4882a593Smuzhiyun /**
3384*4882a593Smuzhiyun * t4_check_fw_version - check if the FW is supported with this driver
3385*4882a593Smuzhiyun * @adap: the adapter
3386*4882a593Smuzhiyun *
3387*4882a593Smuzhiyun * Checks if an adapter's FW is compatible with the driver. Returns 0
3388*4882a593Smuzhiyun * if there's exact match, a negative error if the version could not be
3389*4882a593Smuzhiyun * read or there's a major version mismatch
3390*4882a593Smuzhiyun */
t4_check_fw_version(struct adapter * adap)3391*4882a593Smuzhiyun int t4_check_fw_version(struct adapter *adap)
3392*4882a593Smuzhiyun {
3393*4882a593Smuzhiyun int i, ret, major, minor, micro;
3394*4882a593Smuzhiyun int exp_major, exp_minor, exp_micro;
3395*4882a593Smuzhiyun unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3398*4882a593Smuzhiyun /* Try multiple times before returning error */
3399*4882a593Smuzhiyun for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3400*4882a593Smuzhiyun ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3401*4882a593Smuzhiyun
3402*4882a593Smuzhiyun if (ret)
3403*4882a593Smuzhiyun return ret;
3404*4882a593Smuzhiyun
3405*4882a593Smuzhiyun major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3406*4882a593Smuzhiyun minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3407*4882a593Smuzhiyun micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3408*4882a593Smuzhiyun
3409*4882a593Smuzhiyun switch (chip_version) {
3410*4882a593Smuzhiyun case CHELSIO_T4:
3411*4882a593Smuzhiyun exp_major = T4FW_MIN_VERSION_MAJOR;
3412*4882a593Smuzhiyun exp_minor = T4FW_MIN_VERSION_MINOR;
3413*4882a593Smuzhiyun exp_micro = T4FW_MIN_VERSION_MICRO;
3414*4882a593Smuzhiyun break;
3415*4882a593Smuzhiyun case CHELSIO_T5:
3416*4882a593Smuzhiyun exp_major = T5FW_MIN_VERSION_MAJOR;
3417*4882a593Smuzhiyun exp_minor = T5FW_MIN_VERSION_MINOR;
3418*4882a593Smuzhiyun exp_micro = T5FW_MIN_VERSION_MICRO;
3419*4882a593Smuzhiyun break;
3420*4882a593Smuzhiyun case CHELSIO_T6:
3421*4882a593Smuzhiyun exp_major = T6FW_MIN_VERSION_MAJOR;
3422*4882a593Smuzhiyun exp_minor = T6FW_MIN_VERSION_MINOR;
3423*4882a593Smuzhiyun exp_micro = T6FW_MIN_VERSION_MICRO;
3424*4882a593Smuzhiyun break;
3425*4882a593Smuzhiyun default:
3426*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3427*4882a593Smuzhiyun adap->chip);
3428*4882a593Smuzhiyun return -EINVAL;
3429*4882a593Smuzhiyun }
3430*4882a593Smuzhiyun
3431*4882a593Smuzhiyun if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3432*4882a593Smuzhiyun (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3433*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3434*4882a593Smuzhiyun "Card has firmware version %u.%u.%u, minimum "
3435*4882a593Smuzhiyun "supported firmware is %u.%u.%u.\n", major, minor,
3436*4882a593Smuzhiyun micro, exp_major, exp_minor, exp_micro);
3437*4882a593Smuzhiyun return -EFAULT;
3438*4882a593Smuzhiyun }
3439*4882a593Smuzhiyun return 0;
3440*4882a593Smuzhiyun }
3441*4882a593Smuzhiyun
3442*4882a593Smuzhiyun /* Is the given firmware API compatible with the one the driver was compiled
3443*4882a593Smuzhiyun * with?
3444*4882a593Smuzhiyun */
fw_compatible(const struct fw_hdr * hdr1,const struct fw_hdr * hdr2)3445*4882a593Smuzhiyun static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3446*4882a593Smuzhiyun {
3447*4882a593Smuzhiyun
3448*4882a593Smuzhiyun /* short circuit if it's the exact same firmware version */
3449*4882a593Smuzhiyun if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3450*4882a593Smuzhiyun return 1;
3451*4882a593Smuzhiyun
3452*4882a593Smuzhiyun #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3453*4882a593Smuzhiyun if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3454*4882a593Smuzhiyun SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3455*4882a593Smuzhiyun return 1;
3456*4882a593Smuzhiyun #undef SAME_INTF
3457*4882a593Smuzhiyun
3458*4882a593Smuzhiyun return 0;
3459*4882a593Smuzhiyun }
3460*4882a593Smuzhiyun
3461*4882a593Smuzhiyun /* The firmware in the filesystem is usable, but should it be installed?
3462*4882a593Smuzhiyun * This routine explains itself in detail if it indicates the filesystem
3463*4882a593Smuzhiyun * firmware should be installed.
3464*4882a593Smuzhiyun */
should_install_fs_fw(struct adapter * adap,int card_fw_usable,int k,int c)3465*4882a593Smuzhiyun static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3466*4882a593Smuzhiyun int k, int c)
3467*4882a593Smuzhiyun {
3468*4882a593Smuzhiyun const char *reason;
3469*4882a593Smuzhiyun
3470*4882a593Smuzhiyun if (!card_fw_usable) {
3471*4882a593Smuzhiyun reason = "incompatible or unusable";
3472*4882a593Smuzhiyun goto install;
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun
3475*4882a593Smuzhiyun if (k > c) {
3476*4882a593Smuzhiyun reason = "older than the version supported with this driver";
3477*4882a593Smuzhiyun goto install;
3478*4882a593Smuzhiyun }
3479*4882a593Smuzhiyun
3480*4882a593Smuzhiyun return 0;
3481*4882a593Smuzhiyun
3482*4882a593Smuzhiyun install:
3483*4882a593Smuzhiyun dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3484*4882a593Smuzhiyun "installing firmware %u.%u.%u.%u on card.\n",
3485*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3486*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3487*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3488*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3489*4882a593Smuzhiyun
3490*4882a593Smuzhiyun return 1;
3491*4882a593Smuzhiyun }
3492*4882a593Smuzhiyun
t4_prep_fw(struct adapter * adap,struct fw_info * fw_info,const u8 * fw_data,unsigned int fw_size,struct fw_hdr * card_fw,enum dev_state state,int * reset)3493*4882a593Smuzhiyun int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3494*4882a593Smuzhiyun const u8 *fw_data, unsigned int fw_size,
3495*4882a593Smuzhiyun struct fw_hdr *card_fw, enum dev_state state,
3496*4882a593Smuzhiyun int *reset)
3497*4882a593Smuzhiyun {
3498*4882a593Smuzhiyun int ret, card_fw_usable, fs_fw_usable;
3499*4882a593Smuzhiyun const struct fw_hdr *fs_fw;
3500*4882a593Smuzhiyun const struct fw_hdr *drv_fw;
3501*4882a593Smuzhiyun
3502*4882a593Smuzhiyun drv_fw = &fw_info->fw_hdr;
3503*4882a593Smuzhiyun
3504*4882a593Smuzhiyun /* Read the header of the firmware on the card */
3505*4882a593Smuzhiyun ret = t4_read_flash(adap, FLASH_FW_START,
3506*4882a593Smuzhiyun sizeof(*card_fw) / sizeof(uint32_t),
3507*4882a593Smuzhiyun (uint32_t *)card_fw, 1);
3508*4882a593Smuzhiyun if (ret == 0) {
3509*4882a593Smuzhiyun card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3510*4882a593Smuzhiyun } else {
3511*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3512*4882a593Smuzhiyun "Unable to read card's firmware header: %d\n", ret);
3513*4882a593Smuzhiyun card_fw_usable = 0;
3514*4882a593Smuzhiyun }
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun if (fw_data != NULL) {
3517*4882a593Smuzhiyun fs_fw = (const void *)fw_data;
3518*4882a593Smuzhiyun fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3519*4882a593Smuzhiyun } else {
3520*4882a593Smuzhiyun fs_fw = NULL;
3521*4882a593Smuzhiyun fs_fw_usable = 0;
3522*4882a593Smuzhiyun }
3523*4882a593Smuzhiyun
3524*4882a593Smuzhiyun if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3525*4882a593Smuzhiyun (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3526*4882a593Smuzhiyun /* Common case: the firmware on the card is an exact match and
3527*4882a593Smuzhiyun * the filesystem one is an exact match too, or the filesystem
3528*4882a593Smuzhiyun * one is absent/incompatible.
3529*4882a593Smuzhiyun */
3530*4882a593Smuzhiyun } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3531*4882a593Smuzhiyun should_install_fs_fw(adap, card_fw_usable,
3532*4882a593Smuzhiyun be32_to_cpu(fs_fw->fw_ver),
3533*4882a593Smuzhiyun be32_to_cpu(card_fw->fw_ver))) {
3534*4882a593Smuzhiyun ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3535*4882a593Smuzhiyun fw_size, 0);
3536*4882a593Smuzhiyun if (ret != 0) {
3537*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3538*4882a593Smuzhiyun "failed to install firmware: %d\n", ret);
3539*4882a593Smuzhiyun goto bye;
3540*4882a593Smuzhiyun }
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun /* Installed successfully, update the cached header too. */
3543*4882a593Smuzhiyun *card_fw = *fs_fw;
3544*4882a593Smuzhiyun card_fw_usable = 1;
3545*4882a593Smuzhiyun *reset = 0; /* already reset as part of load_fw */
3546*4882a593Smuzhiyun }
3547*4882a593Smuzhiyun
3548*4882a593Smuzhiyun if (!card_fw_usable) {
3549*4882a593Smuzhiyun uint32_t d, c, k;
3550*4882a593Smuzhiyun
3551*4882a593Smuzhiyun d = be32_to_cpu(drv_fw->fw_ver);
3552*4882a593Smuzhiyun c = be32_to_cpu(card_fw->fw_ver);
3553*4882a593Smuzhiyun k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3554*4882a593Smuzhiyun
3555*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3556*4882a593Smuzhiyun "chip state %d, "
3557*4882a593Smuzhiyun "driver compiled with %d.%d.%d.%d, "
3558*4882a593Smuzhiyun "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3559*4882a593Smuzhiyun state,
3560*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3561*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3562*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3563*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3564*4882a593Smuzhiyun FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3565*4882a593Smuzhiyun FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3566*4882a593Smuzhiyun ret = -EINVAL;
3567*4882a593Smuzhiyun goto bye;
3568*4882a593Smuzhiyun }
3569*4882a593Smuzhiyun
3570*4882a593Smuzhiyun /* We're using whatever's on the card and it's known to be good. */
3571*4882a593Smuzhiyun adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3572*4882a593Smuzhiyun adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun bye:
3575*4882a593Smuzhiyun return ret;
3576*4882a593Smuzhiyun }
3577*4882a593Smuzhiyun
3578*4882a593Smuzhiyun /**
3579*4882a593Smuzhiyun * t4_flash_erase_sectors - erase a range of flash sectors
3580*4882a593Smuzhiyun * @adapter: the adapter
3581*4882a593Smuzhiyun * @start: the first sector to erase
3582*4882a593Smuzhiyun * @end: the last sector to erase
3583*4882a593Smuzhiyun *
3584*4882a593Smuzhiyun * Erases the sectors in the given inclusive range.
3585*4882a593Smuzhiyun */
t4_flash_erase_sectors(struct adapter * adapter,int start,int end)3586*4882a593Smuzhiyun static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3587*4882a593Smuzhiyun {
3588*4882a593Smuzhiyun int ret = 0;
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun if (end >= adapter->params.sf_nsec)
3591*4882a593Smuzhiyun return -EINVAL;
3592*4882a593Smuzhiyun
3593*4882a593Smuzhiyun while (start <= end) {
3594*4882a593Smuzhiyun if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3595*4882a593Smuzhiyun (ret = sf1_write(adapter, 4, 0, 1,
3596*4882a593Smuzhiyun SF_ERASE_SECTOR | (start << 8))) != 0 ||
3597*4882a593Smuzhiyun (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3598*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
3599*4882a593Smuzhiyun "erase of flash sector %d failed, error %d\n",
3600*4882a593Smuzhiyun start, ret);
3601*4882a593Smuzhiyun break;
3602*4882a593Smuzhiyun }
3603*4882a593Smuzhiyun start++;
3604*4882a593Smuzhiyun }
3605*4882a593Smuzhiyun t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3606*4882a593Smuzhiyun return ret;
3607*4882a593Smuzhiyun }
3608*4882a593Smuzhiyun
3609*4882a593Smuzhiyun /**
3610*4882a593Smuzhiyun * t4_flash_cfg_addr - return the address of the flash configuration file
3611*4882a593Smuzhiyun * @adapter: the adapter
3612*4882a593Smuzhiyun *
3613*4882a593Smuzhiyun * Return the address within the flash where the Firmware Configuration
3614*4882a593Smuzhiyun * File is stored.
3615*4882a593Smuzhiyun */
t4_flash_cfg_addr(struct adapter * adapter)3616*4882a593Smuzhiyun unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3617*4882a593Smuzhiyun {
3618*4882a593Smuzhiyun if (adapter->params.sf_size == 0x100000)
3619*4882a593Smuzhiyun return FLASH_FPGA_CFG_START;
3620*4882a593Smuzhiyun else
3621*4882a593Smuzhiyun return FLASH_CFG_START;
3622*4882a593Smuzhiyun }
3623*4882a593Smuzhiyun
3624*4882a593Smuzhiyun /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3625*4882a593Smuzhiyun * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3626*4882a593Smuzhiyun * and emit an error message for mismatched firmware to save our caller the
3627*4882a593Smuzhiyun * effort ...
3628*4882a593Smuzhiyun */
t4_fw_matches_chip(const struct adapter * adap,const struct fw_hdr * hdr)3629*4882a593Smuzhiyun static bool t4_fw_matches_chip(const struct adapter *adap,
3630*4882a593Smuzhiyun const struct fw_hdr *hdr)
3631*4882a593Smuzhiyun {
3632*4882a593Smuzhiyun /* The expression below will return FALSE for any unsupported adapter
3633*4882a593Smuzhiyun * which will keep us "honest" in the future ...
3634*4882a593Smuzhiyun */
3635*4882a593Smuzhiyun if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3636*4882a593Smuzhiyun (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3637*4882a593Smuzhiyun (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3638*4882a593Smuzhiyun return true;
3639*4882a593Smuzhiyun
3640*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3641*4882a593Smuzhiyun "FW image (%d) is not suitable for this adapter (%d)\n",
3642*4882a593Smuzhiyun hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3643*4882a593Smuzhiyun return false;
3644*4882a593Smuzhiyun }
3645*4882a593Smuzhiyun
3646*4882a593Smuzhiyun /**
3647*4882a593Smuzhiyun * t4_load_fw - download firmware
3648*4882a593Smuzhiyun * @adap: the adapter
3649*4882a593Smuzhiyun * @fw_data: the firmware image to write
3650*4882a593Smuzhiyun * @size: image size
3651*4882a593Smuzhiyun *
3652*4882a593Smuzhiyun * Write the supplied firmware image to the card's serial flash.
3653*4882a593Smuzhiyun */
t4_load_fw(struct adapter * adap,const u8 * fw_data,unsigned int size)3654*4882a593Smuzhiyun int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3655*4882a593Smuzhiyun {
3656*4882a593Smuzhiyun u32 csum;
3657*4882a593Smuzhiyun int ret, addr;
3658*4882a593Smuzhiyun unsigned int i;
3659*4882a593Smuzhiyun u8 first_page[SF_PAGE_SIZE];
3660*4882a593Smuzhiyun const __be32 *p = (const __be32 *)fw_data;
3661*4882a593Smuzhiyun const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3662*4882a593Smuzhiyun unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3663*4882a593Smuzhiyun unsigned int fw_start_sec = FLASH_FW_START_SEC;
3664*4882a593Smuzhiyun unsigned int fw_size = FLASH_FW_MAX_SIZE;
3665*4882a593Smuzhiyun unsigned int fw_start = FLASH_FW_START;
3666*4882a593Smuzhiyun
3667*4882a593Smuzhiyun if (!size) {
3668*4882a593Smuzhiyun dev_err(adap->pdev_dev, "FW image has no data\n");
3669*4882a593Smuzhiyun return -EINVAL;
3670*4882a593Smuzhiyun }
3671*4882a593Smuzhiyun if (size & 511) {
3672*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3673*4882a593Smuzhiyun "FW image size not multiple of 512 bytes\n");
3674*4882a593Smuzhiyun return -EINVAL;
3675*4882a593Smuzhiyun }
3676*4882a593Smuzhiyun if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3677*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3678*4882a593Smuzhiyun "FW image size differs from size in FW header\n");
3679*4882a593Smuzhiyun return -EINVAL;
3680*4882a593Smuzhiyun }
3681*4882a593Smuzhiyun if (size > fw_size) {
3682*4882a593Smuzhiyun dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3683*4882a593Smuzhiyun fw_size);
3684*4882a593Smuzhiyun return -EFBIG;
3685*4882a593Smuzhiyun }
3686*4882a593Smuzhiyun if (!t4_fw_matches_chip(adap, hdr))
3687*4882a593Smuzhiyun return -EINVAL;
3688*4882a593Smuzhiyun
3689*4882a593Smuzhiyun for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3690*4882a593Smuzhiyun csum += be32_to_cpu(p[i]);
3691*4882a593Smuzhiyun
3692*4882a593Smuzhiyun if (csum != 0xffffffff) {
3693*4882a593Smuzhiyun dev_err(adap->pdev_dev,
3694*4882a593Smuzhiyun "corrupted firmware image, checksum %#x\n", csum);
3695*4882a593Smuzhiyun return -EINVAL;
3696*4882a593Smuzhiyun }
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3699*4882a593Smuzhiyun ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3700*4882a593Smuzhiyun if (ret)
3701*4882a593Smuzhiyun goto out;
3702*4882a593Smuzhiyun
3703*4882a593Smuzhiyun /*
3704*4882a593Smuzhiyun * We write the correct version at the end so the driver can see a bad
3705*4882a593Smuzhiyun * version if the FW write fails. Start by writing a copy of the
3706*4882a593Smuzhiyun * first page with a bad version.
3707*4882a593Smuzhiyun */
3708*4882a593Smuzhiyun memcpy(first_page, fw_data, SF_PAGE_SIZE);
3709*4882a593Smuzhiyun ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3710*4882a593Smuzhiyun ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
3711*4882a593Smuzhiyun if (ret)
3712*4882a593Smuzhiyun goto out;
3713*4882a593Smuzhiyun
3714*4882a593Smuzhiyun addr = fw_start;
3715*4882a593Smuzhiyun for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3716*4882a593Smuzhiyun addr += SF_PAGE_SIZE;
3717*4882a593Smuzhiyun fw_data += SF_PAGE_SIZE;
3718*4882a593Smuzhiyun ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
3719*4882a593Smuzhiyun if (ret)
3720*4882a593Smuzhiyun goto out;
3721*4882a593Smuzhiyun }
3722*4882a593Smuzhiyun
3723*4882a593Smuzhiyun ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
3724*4882a593Smuzhiyun sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
3725*4882a593Smuzhiyun true);
3726*4882a593Smuzhiyun out:
3727*4882a593Smuzhiyun if (ret)
3728*4882a593Smuzhiyun dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3729*4882a593Smuzhiyun ret);
3730*4882a593Smuzhiyun else
3731*4882a593Smuzhiyun ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3732*4882a593Smuzhiyun return ret;
3733*4882a593Smuzhiyun }
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun /**
3736*4882a593Smuzhiyun * t4_phy_fw_ver - return current PHY firmware version
3737*4882a593Smuzhiyun * @adap: the adapter
3738*4882a593Smuzhiyun * @phy_fw_ver: return value buffer for PHY firmware version
3739*4882a593Smuzhiyun *
3740*4882a593Smuzhiyun * Returns the current version of external PHY firmware on the
3741*4882a593Smuzhiyun * adapter.
3742*4882a593Smuzhiyun */
t4_phy_fw_ver(struct adapter * adap,int * phy_fw_ver)3743*4882a593Smuzhiyun int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3744*4882a593Smuzhiyun {
3745*4882a593Smuzhiyun u32 param, val;
3746*4882a593Smuzhiyun int ret;
3747*4882a593Smuzhiyun
3748*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3749*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3750*4882a593Smuzhiyun FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3751*4882a593Smuzhiyun FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3752*4882a593Smuzhiyun ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3753*4882a593Smuzhiyun ¶m, &val);
3754*4882a593Smuzhiyun if (ret)
3755*4882a593Smuzhiyun return ret;
3756*4882a593Smuzhiyun *phy_fw_ver = val;
3757*4882a593Smuzhiyun return 0;
3758*4882a593Smuzhiyun }
3759*4882a593Smuzhiyun
3760*4882a593Smuzhiyun /**
3761*4882a593Smuzhiyun * t4_load_phy_fw - download port PHY firmware
3762*4882a593Smuzhiyun * @adap: the adapter
3763*4882a593Smuzhiyun * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3764*4882a593Smuzhiyun * @phy_fw_version: function to check PHY firmware versions
3765*4882a593Smuzhiyun * @phy_fw_data: the PHY firmware image to write
3766*4882a593Smuzhiyun * @phy_fw_size: image size
3767*4882a593Smuzhiyun *
3768*4882a593Smuzhiyun * Transfer the specified PHY firmware to the adapter. If a non-NULL
3769*4882a593Smuzhiyun * @phy_fw_version is supplied, then it will be used to determine if
3770*4882a593Smuzhiyun * it's necessary to perform the transfer by comparing the version
3771*4882a593Smuzhiyun * of any existing adapter PHY firmware with that of the passed in
3772*4882a593Smuzhiyun * PHY firmware image.
3773*4882a593Smuzhiyun *
3774*4882a593Smuzhiyun * A negative error number will be returned if an error occurs. If
3775*4882a593Smuzhiyun * version number support is available and there's no need to upgrade
3776*4882a593Smuzhiyun * the firmware, 0 will be returned. If firmware is successfully
3777*4882a593Smuzhiyun * transferred to the adapter, 1 will be returned.
3778*4882a593Smuzhiyun *
3779*4882a593Smuzhiyun * NOTE: some adapters only have local RAM to store the PHY firmware. As
3780*4882a593Smuzhiyun * a result, a RESET of the adapter would cause that RAM to lose its
3781*4882a593Smuzhiyun * contents. Thus, loading PHY firmware on such adapters must happen
3782*4882a593Smuzhiyun * after any FW_RESET_CMDs ...
3783*4882a593Smuzhiyun */
t4_load_phy_fw(struct adapter * adap,int win,int (* phy_fw_version)(const u8 *,size_t),const u8 * phy_fw_data,size_t phy_fw_size)3784*4882a593Smuzhiyun int t4_load_phy_fw(struct adapter *adap, int win,
3785*4882a593Smuzhiyun int (*phy_fw_version)(const u8 *, size_t),
3786*4882a593Smuzhiyun const u8 *phy_fw_data, size_t phy_fw_size)
3787*4882a593Smuzhiyun {
3788*4882a593Smuzhiyun int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3789*4882a593Smuzhiyun unsigned long mtype = 0, maddr = 0;
3790*4882a593Smuzhiyun u32 param, val;
3791*4882a593Smuzhiyun int ret;
3792*4882a593Smuzhiyun
3793*4882a593Smuzhiyun /* If we have version number support, then check to see if the adapter
3794*4882a593Smuzhiyun * already has up-to-date PHY firmware loaded.
3795*4882a593Smuzhiyun */
3796*4882a593Smuzhiyun if (phy_fw_version) {
3797*4882a593Smuzhiyun new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3798*4882a593Smuzhiyun ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3799*4882a593Smuzhiyun if (ret < 0)
3800*4882a593Smuzhiyun return ret;
3801*4882a593Smuzhiyun
3802*4882a593Smuzhiyun if (cur_phy_fw_ver >= new_phy_fw_vers) {
3803*4882a593Smuzhiyun CH_WARN(adap, "PHY Firmware already up-to-date, "
3804*4882a593Smuzhiyun "version %#x\n", cur_phy_fw_ver);
3805*4882a593Smuzhiyun return 0;
3806*4882a593Smuzhiyun }
3807*4882a593Smuzhiyun }
3808*4882a593Smuzhiyun
3809*4882a593Smuzhiyun /* Ask the firmware where it wants us to copy the PHY firmware image.
3810*4882a593Smuzhiyun * The size of the file requires a special version of the READ command
3811*4882a593Smuzhiyun * which will pass the file size via the values field in PARAMS_CMD and
3812*4882a593Smuzhiyun * retrieve the return value from firmware and place it in the same
3813*4882a593Smuzhiyun * buffer values
3814*4882a593Smuzhiyun */
3815*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3816*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3817*4882a593Smuzhiyun FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3818*4882a593Smuzhiyun FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3819*4882a593Smuzhiyun val = phy_fw_size;
3820*4882a593Smuzhiyun ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3821*4882a593Smuzhiyun ¶m, &val, 1, true);
3822*4882a593Smuzhiyun if (ret < 0)
3823*4882a593Smuzhiyun return ret;
3824*4882a593Smuzhiyun mtype = val >> 8;
3825*4882a593Smuzhiyun maddr = (val & 0xff) << 16;
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun /* Copy the supplied PHY Firmware image to the adapter memory location
3828*4882a593Smuzhiyun * allocated by the adapter firmware.
3829*4882a593Smuzhiyun */
3830*4882a593Smuzhiyun spin_lock_bh(&adap->win0_lock);
3831*4882a593Smuzhiyun ret = t4_memory_rw(adap, win, mtype, maddr,
3832*4882a593Smuzhiyun phy_fw_size, (__be32 *)phy_fw_data,
3833*4882a593Smuzhiyun T4_MEMORY_WRITE);
3834*4882a593Smuzhiyun spin_unlock_bh(&adap->win0_lock);
3835*4882a593Smuzhiyun if (ret)
3836*4882a593Smuzhiyun return ret;
3837*4882a593Smuzhiyun
3838*4882a593Smuzhiyun /* Tell the firmware that the PHY firmware image has been written to
3839*4882a593Smuzhiyun * RAM and it can now start copying it over to the PHYs. The chip
3840*4882a593Smuzhiyun * firmware will RESET the affected PHYs as part of this operation
3841*4882a593Smuzhiyun * leaving them running the new PHY firmware image.
3842*4882a593Smuzhiyun */
3843*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3844*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3845*4882a593Smuzhiyun FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3846*4882a593Smuzhiyun FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3847*4882a593Smuzhiyun ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3848*4882a593Smuzhiyun ¶m, &val, 30000);
3849*4882a593Smuzhiyun
3850*4882a593Smuzhiyun /* If we have version number support, then check to see that the new
3851*4882a593Smuzhiyun * firmware got loaded properly.
3852*4882a593Smuzhiyun */
3853*4882a593Smuzhiyun if (phy_fw_version) {
3854*4882a593Smuzhiyun ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3855*4882a593Smuzhiyun if (ret < 0)
3856*4882a593Smuzhiyun return ret;
3857*4882a593Smuzhiyun
3858*4882a593Smuzhiyun if (cur_phy_fw_ver != new_phy_fw_vers) {
3859*4882a593Smuzhiyun CH_WARN(adap, "PHY Firmware did not update: "
3860*4882a593Smuzhiyun "version on adapter %#x, "
3861*4882a593Smuzhiyun "version flashed %#x\n",
3862*4882a593Smuzhiyun cur_phy_fw_ver, new_phy_fw_vers);
3863*4882a593Smuzhiyun return -ENXIO;
3864*4882a593Smuzhiyun }
3865*4882a593Smuzhiyun }
3866*4882a593Smuzhiyun
3867*4882a593Smuzhiyun return 1;
3868*4882a593Smuzhiyun }
3869*4882a593Smuzhiyun
3870*4882a593Smuzhiyun /**
3871*4882a593Smuzhiyun * t4_fwcache - firmware cache operation
3872*4882a593Smuzhiyun * @adap: the adapter
3873*4882a593Smuzhiyun * @op : the operation (flush or flush and invalidate)
3874*4882a593Smuzhiyun */
t4_fwcache(struct adapter * adap,enum fw_params_param_dev_fwcache op)3875*4882a593Smuzhiyun int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3876*4882a593Smuzhiyun {
3877*4882a593Smuzhiyun struct fw_params_cmd c;
3878*4882a593Smuzhiyun
3879*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
3880*4882a593Smuzhiyun c.op_to_vfn =
3881*4882a593Smuzhiyun cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3882*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3883*4882a593Smuzhiyun FW_PARAMS_CMD_PFN_V(adap->pf) |
3884*4882a593Smuzhiyun FW_PARAMS_CMD_VFN_V(0));
3885*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3886*4882a593Smuzhiyun c.param[0].mnem =
3887*4882a593Smuzhiyun cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3888*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3889*4882a593Smuzhiyun c.param[0].val = cpu_to_be32(op);
3890*4882a593Smuzhiyun
3891*4882a593Smuzhiyun return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3892*4882a593Smuzhiyun }
3893*4882a593Smuzhiyun
t4_cim_read_pif_la(struct adapter * adap,u32 * pif_req,u32 * pif_rsp,unsigned int * pif_req_wrptr,unsigned int * pif_rsp_wrptr)3894*4882a593Smuzhiyun void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3895*4882a593Smuzhiyun unsigned int *pif_req_wrptr,
3896*4882a593Smuzhiyun unsigned int *pif_rsp_wrptr)
3897*4882a593Smuzhiyun {
3898*4882a593Smuzhiyun int i, j;
3899*4882a593Smuzhiyun u32 cfg, val, req, rsp;
3900*4882a593Smuzhiyun
3901*4882a593Smuzhiyun cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3902*4882a593Smuzhiyun if (cfg & LADBGEN_F)
3903*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3904*4882a593Smuzhiyun
3905*4882a593Smuzhiyun val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3906*4882a593Smuzhiyun req = POLADBGWRPTR_G(val);
3907*4882a593Smuzhiyun rsp = PILADBGWRPTR_G(val);
3908*4882a593Smuzhiyun if (pif_req_wrptr)
3909*4882a593Smuzhiyun *pif_req_wrptr = req;
3910*4882a593Smuzhiyun if (pif_rsp_wrptr)
3911*4882a593Smuzhiyun *pif_rsp_wrptr = rsp;
3912*4882a593Smuzhiyun
3913*4882a593Smuzhiyun for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3914*4882a593Smuzhiyun for (j = 0; j < 6; j++) {
3915*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3916*4882a593Smuzhiyun PILADBGRDPTR_V(rsp));
3917*4882a593Smuzhiyun *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3918*4882a593Smuzhiyun *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3919*4882a593Smuzhiyun req++;
3920*4882a593Smuzhiyun rsp++;
3921*4882a593Smuzhiyun }
3922*4882a593Smuzhiyun req = (req + 2) & POLADBGRDPTR_M;
3923*4882a593Smuzhiyun rsp = (rsp + 2) & PILADBGRDPTR_M;
3924*4882a593Smuzhiyun }
3925*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3926*4882a593Smuzhiyun }
3927*4882a593Smuzhiyun
t4_cim_read_ma_la(struct adapter * adap,u32 * ma_req,u32 * ma_rsp)3928*4882a593Smuzhiyun void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3929*4882a593Smuzhiyun {
3930*4882a593Smuzhiyun u32 cfg;
3931*4882a593Smuzhiyun int i, j, idx;
3932*4882a593Smuzhiyun
3933*4882a593Smuzhiyun cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3934*4882a593Smuzhiyun if (cfg & LADBGEN_F)
3935*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3936*4882a593Smuzhiyun
3937*4882a593Smuzhiyun for (i = 0; i < CIM_MALA_SIZE; i++) {
3938*4882a593Smuzhiyun for (j = 0; j < 5; j++) {
3939*4882a593Smuzhiyun idx = 8 * i + j;
3940*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3941*4882a593Smuzhiyun PILADBGRDPTR_V(idx));
3942*4882a593Smuzhiyun *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3943*4882a593Smuzhiyun *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3944*4882a593Smuzhiyun }
3945*4882a593Smuzhiyun }
3946*4882a593Smuzhiyun t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3947*4882a593Smuzhiyun }
3948*4882a593Smuzhiyun
t4_ulprx_read_la(struct adapter * adap,u32 * la_buf)3949*4882a593Smuzhiyun void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3950*4882a593Smuzhiyun {
3951*4882a593Smuzhiyun unsigned int i, j;
3952*4882a593Smuzhiyun
3953*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
3954*4882a593Smuzhiyun u32 *p = la_buf + i;
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3957*4882a593Smuzhiyun j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3958*4882a593Smuzhiyun t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3959*4882a593Smuzhiyun for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3960*4882a593Smuzhiyun *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun }
3963*4882a593Smuzhiyun
3964*4882a593Smuzhiyun /* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
3965*4882a593Smuzhiyun * Capabilities which we control with separate controls -- see, for instance,
3966*4882a593Smuzhiyun * Pause Frames and Forward Error Correction. In order to determine what the
3967*4882a593Smuzhiyun * full set of Advertised Port Capabilities are, the base Advertised Port
3968*4882a593Smuzhiyun * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
3969*4882a593Smuzhiyun * Port Capabilities associated with those other controls. See
3970*4882a593Smuzhiyun * t4_link_acaps() for how this is done.
3971*4882a593Smuzhiyun */
3972*4882a593Smuzhiyun #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3973*4882a593Smuzhiyun FW_PORT_CAP32_ANEG)
3974*4882a593Smuzhiyun
3975*4882a593Smuzhiyun /**
3976*4882a593Smuzhiyun * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3977*4882a593Smuzhiyun * @caps16: a 16-bit Port Capabilities value
3978*4882a593Smuzhiyun *
3979*4882a593Smuzhiyun * Returns the equivalent 32-bit Port Capabilities value.
3980*4882a593Smuzhiyun */
fwcaps16_to_caps32(fw_port_cap16_t caps16)3981*4882a593Smuzhiyun static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3982*4882a593Smuzhiyun {
3983*4882a593Smuzhiyun fw_port_cap32_t caps32 = 0;
3984*4882a593Smuzhiyun
3985*4882a593Smuzhiyun #define CAP16_TO_CAP32(__cap) \
3986*4882a593Smuzhiyun do { \
3987*4882a593Smuzhiyun if (caps16 & FW_PORT_CAP_##__cap) \
3988*4882a593Smuzhiyun caps32 |= FW_PORT_CAP32_##__cap; \
3989*4882a593Smuzhiyun } while (0)
3990*4882a593Smuzhiyun
3991*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_100M);
3992*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_1G);
3993*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_25G);
3994*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_10G);
3995*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_40G);
3996*4882a593Smuzhiyun CAP16_TO_CAP32(SPEED_100G);
3997*4882a593Smuzhiyun CAP16_TO_CAP32(FC_RX);
3998*4882a593Smuzhiyun CAP16_TO_CAP32(FC_TX);
3999*4882a593Smuzhiyun CAP16_TO_CAP32(ANEG);
4000*4882a593Smuzhiyun CAP16_TO_CAP32(FORCE_PAUSE);
4001*4882a593Smuzhiyun CAP16_TO_CAP32(MDIAUTO);
4002*4882a593Smuzhiyun CAP16_TO_CAP32(MDISTRAIGHT);
4003*4882a593Smuzhiyun CAP16_TO_CAP32(FEC_RS);
4004*4882a593Smuzhiyun CAP16_TO_CAP32(FEC_BASER_RS);
4005*4882a593Smuzhiyun CAP16_TO_CAP32(802_3_PAUSE);
4006*4882a593Smuzhiyun CAP16_TO_CAP32(802_3_ASM_DIR);
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun #undef CAP16_TO_CAP32
4009*4882a593Smuzhiyun
4010*4882a593Smuzhiyun return caps32;
4011*4882a593Smuzhiyun }
4012*4882a593Smuzhiyun
4013*4882a593Smuzhiyun /**
4014*4882a593Smuzhiyun * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4015*4882a593Smuzhiyun * @caps32: a 32-bit Port Capabilities value
4016*4882a593Smuzhiyun *
4017*4882a593Smuzhiyun * Returns the equivalent 16-bit Port Capabilities value. Note that
4018*4882a593Smuzhiyun * not all 32-bit Port Capabilities can be represented in the 16-bit
4019*4882a593Smuzhiyun * Port Capabilities and some fields/values may not make it.
4020*4882a593Smuzhiyun */
fwcaps32_to_caps16(fw_port_cap32_t caps32)4021*4882a593Smuzhiyun static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4022*4882a593Smuzhiyun {
4023*4882a593Smuzhiyun fw_port_cap16_t caps16 = 0;
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun #define CAP32_TO_CAP16(__cap) \
4026*4882a593Smuzhiyun do { \
4027*4882a593Smuzhiyun if (caps32 & FW_PORT_CAP32_##__cap) \
4028*4882a593Smuzhiyun caps16 |= FW_PORT_CAP_##__cap; \
4029*4882a593Smuzhiyun } while (0)
4030*4882a593Smuzhiyun
4031*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_100M);
4032*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_1G);
4033*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_10G);
4034*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_25G);
4035*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_40G);
4036*4882a593Smuzhiyun CAP32_TO_CAP16(SPEED_100G);
4037*4882a593Smuzhiyun CAP32_TO_CAP16(FC_RX);
4038*4882a593Smuzhiyun CAP32_TO_CAP16(FC_TX);
4039*4882a593Smuzhiyun CAP32_TO_CAP16(802_3_PAUSE);
4040*4882a593Smuzhiyun CAP32_TO_CAP16(802_3_ASM_DIR);
4041*4882a593Smuzhiyun CAP32_TO_CAP16(ANEG);
4042*4882a593Smuzhiyun CAP32_TO_CAP16(FORCE_PAUSE);
4043*4882a593Smuzhiyun CAP32_TO_CAP16(MDIAUTO);
4044*4882a593Smuzhiyun CAP32_TO_CAP16(MDISTRAIGHT);
4045*4882a593Smuzhiyun CAP32_TO_CAP16(FEC_RS);
4046*4882a593Smuzhiyun CAP32_TO_CAP16(FEC_BASER_RS);
4047*4882a593Smuzhiyun
4048*4882a593Smuzhiyun #undef CAP32_TO_CAP16
4049*4882a593Smuzhiyun
4050*4882a593Smuzhiyun return caps16;
4051*4882a593Smuzhiyun }
4052*4882a593Smuzhiyun
4053*4882a593Smuzhiyun /* Translate Firmware Port Capabilities Pause specification to Common Code */
fwcap_to_cc_pause(fw_port_cap32_t fw_pause)4054*4882a593Smuzhiyun static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4055*4882a593Smuzhiyun {
4056*4882a593Smuzhiyun enum cc_pause cc_pause = 0;
4057*4882a593Smuzhiyun
4058*4882a593Smuzhiyun if (fw_pause & FW_PORT_CAP32_FC_RX)
4059*4882a593Smuzhiyun cc_pause |= PAUSE_RX;
4060*4882a593Smuzhiyun if (fw_pause & FW_PORT_CAP32_FC_TX)
4061*4882a593Smuzhiyun cc_pause |= PAUSE_TX;
4062*4882a593Smuzhiyun
4063*4882a593Smuzhiyun return cc_pause;
4064*4882a593Smuzhiyun }
4065*4882a593Smuzhiyun
4066*4882a593Smuzhiyun /* Translate Common Code Pause specification into Firmware Port Capabilities */
cc_to_fwcap_pause(enum cc_pause cc_pause)4067*4882a593Smuzhiyun static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4068*4882a593Smuzhiyun {
4069*4882a593Smuzhiyun /* Translate orthogonal RX/TX Pause Controls for L1 Configure
4070*4882a593Smuzhiyun * commands, etc.
4071*4882a593Smuzhiyun */
4072*4882a593Smuzhiyun fw_port_cap32_t fw_pause = 0;
4073*4882a593Smuzhiyun
4074*4882a593Smuzhiyun if (cc_pause & PAUSE_RX)
4075*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_FC_RX;
4076*4882a593Smuzhiyun if (cc_pause & PAUSE_TX)
4077*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_FC_TX;
4078*4882a593Smuzhiyun if (!(cc_pause & PAUSE_AUTONEG))
4079*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4080*4882a593Smuzhiyun
4081*4882a593Smuzhiyun /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
4082*4882a593Smuzhiyun * Asymmetrical Pause for use in reporting to upper layer OS code, etc.
4083*4882a593Smuzhiyun * Note that these bits are ignored in L1 Configure commands.
4084*4882a593Smuzhiyun */
4085*4882a593Smuzhiyun if (cc_pause & PAUSE_RX) {
4086*4882a593Smuzhiyun if (cc_pause & PAUSE_TX)
4087*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
4088*4882a593Smuzhiyun else
4089*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR |
4090*4882a593Smuzhiyun FW_PORT_CAP32_802_3_PAUSE;
4091*4882a593Smuzhiyun } else if (cc_pause & PAUSE_TX) {
4092*4882a593Smuzhiyun fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
4093*4882a593Smuzhiyun }
4094*4882a593Smuzhiyun
4095*4882a593Smuzhiyun return fw_pause;
4096*4882a593Smuzhiyun }
4097*4882a593Smuzhiyun
4098*4882a593Smuzhiyun /* Translate Firmware Forward Error Correction specification to Common Code */
fwcap_to_cc_fec(fw_port_cap32_t fw_fec)4099*4882a593Smuzhiyun static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4100*4882a593Smuzhiyun {
4101*4882a593Smuzhiyun enum cc_fec cc_fec = 0;
4102*4882a593Smuzhiyun
4103*4882a593Smuzhiyun if (fw_fec & FW_PORT_CAP32_FEC_RS)
4104*4882a593Smuzhiyun cc_fec |= FEC_RS;
4105*4882a593Smuzhiyun if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4106*4882a593Smuzhiyun cc_fec |= FEC_BASER_RS;
4107*4882a593Smuzhiyun
4108*4882a593Smuzhiyun return cc_fec;
4109*4882a593Smuzhiyun }
4110*4882a593Smuzhiyun
4111*4882a593Smuzhiyun /* Translate Common Code Forward Error Correction specification to Firmware */
cc_to_fwcap_fec(enum cc_fec cc_fec)4112*4882a593Smuzhiyun static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4113*4882a593Smuzhiyun {
4114*4882a593Smuzhiyun fw_port_cap32_t fw_fec = 0;
4115*4882a593Smuzhiyun
4116*4882a593Smuzhiyun if (cc_fec & FEC_RS)
4117*4882a593Smuzhiyun fw_fec |= FW_PORT_CAP32_FEC_RS;
4118*4882a593Smuzhiyun if (cc_fec & FEC_BASER_RS)
4119*4882a593Smuzhiyun fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4120*4882a593Smuzhiyun
4121*4882a593Smuzhiyun return fw_fec;
4122*4882a593Smuzhiyun }
4123*4882a593Smuzhiyun
4124*4882a593Smuzhiyun /**
4125*4882a593Smuzhiyun * t4_link_acaps - compute Link Advertised Port Capabilities
4126*4882a593Smuzhiyun * @adapter: the adapter
4127*4882a593Smuzhiyun * @port: the Port ID
4128*4882a593Smuzhiyun * @lc: the Port's Link Configuration
4129*4882a593Smuzhiyun *
4130*4882a593Smuzhiyun * Synthesize the Advertised Port Capabilities we'll be using based on
4131*4882a593Smuzhiyun * the base Advertised Port Capabilities (which have been filtered by
4132*4882a593Smuzhiyun * ADVERT_MASK) plus the individual controls for things like Pause
4133*4882a593Smuzhiyun * Frames, Forward Error Correction, MDI, etc.
4134*4882a593Smuzhiyun */
t4_link_acaps(struct adapter * adapter,unsigned int port,struct link_config * lc)4135*4882a593Smuzhiyun fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
4136*4882a593Smuzhiyun struct link_config *lc)
4137*4882a593Smuzhiyun {
4138*4882a593Smuzhiyun fw_port_cap32_t fw_fc, fw_fec, acaps;
4139*4882a593Smuzhiyun unsigned int fw_mdi;
4140*4882a593Smuzhiyun char cc_fec;
4141*4882a593Smuzhiyun
4142*4882a593Smuzhiyun fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4143*4882a593Smuzhiyun
4144*4882a593Smuzhiyun /* Convert driver coding of Pause Frame Flow Control settings into the
4145*4882a593Smuzhiyun * Firmware's API.
4146*4882a593Smuzhiyun */
4147*4882a593Smuzhiyun fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4148*4882a593Smuzhiyun
4149*4882a593Smuzhiyun /* Convert Common Code Forward Error Control settings into the
4150*4882a593Smuzhiyun * Firmware's API. If the current Requested FEC has "Automatic"
4151*4882a593Smuzhiyun * (IEEE 802.3) specified, then we use whatever the Firmware
4152*4882a593Smuzhiyun * sent us as part of its IEEE 802.3-based interpretation of
4153*4882a593Smuzhiyun * the Transceiver Module EPROM FEC parameters. Otherwise we
4154*4882a593Smuzhiyun * use whatever is in the current Requested FEC settings.
4155*4882a593Smuzhiyun */
4156*4882a593Smuzhiyun if (lc->requested_fec & FEC_AUTO)
4157*4882a593Smuzhiyun cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4158*4882a593Smuzhiyun else
4159*4882a593Smuzhiyun cc_fec = lc->requested_fec;
4160*4882a593Smuzhiyun fw_fec = cc_to_fwcap_fec(cc_fec);
4161*4882a593Smuzhiyun
4162*4882a593Smuzhiyun /* Figure out what our Requested Port Capabilities are going to be.
4163*4882a593Smuzhiyun * Note parallel structure in t4_handle_get_port_info() and
4164*4882a593Smuzhiyun * init_link_config().
4165*4882a593Smuzhiyun */
4166*4882a593Smuzhiyun if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4167*4882a593Smuzhiyun acaps = lc->acaps | fw_fc | fw_fec;
4168*4882a593Smuzhiyun lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4169*4882a593Smuzhiyun lc->fec = cc_fec;
4170*4882a593Smuzhiyun } else if (lc->autoneg == AUTONEG_DISABLE) {
4171*4882a593Smuzhiyun acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4172*4882a593Smuzhiyun lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4173*4882a593Smuzhiyun lc->fec = cc_fec;
4174*4882a593Smuzhiyun } else {
4175*4882a593Smuzhiyun acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
4176*4882a593Smuzhiyun }
4177*4882a593Smuzhiyun
4178*4882a593Smuzhiyun /* Some Requested Port Capabilities are trivially wrong if they exceed
4179*4882a593Smuzhiyun * the Physical Port Capabilities. We can check that here and provide
4180*4882a593Smuzhiyun * moderately useful feedback in the system log.
4181*4882a593Smuzhiyun *
4182*4882a593Smuzhiyun * Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4183*4882a593Smuzhiyun * we need to exclude this from this check in order to maintain
4184*4882a593Smuzhiyun * compatibility ...
4185*4882a593Smuzhiyun */
4186*4882a593Smuzhiyun if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4187*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4188*4882a593Smuzhiyun acaps, lc->pcaps);
4189*4882a593Smuzhiyun return -EINVAL;
4190*4882a593Smuzhiyun }
4191*4882a593Smuzhiyun
4192*4882a593Smuzhiyun return acaps;
4193*4882a593Smuzhiyun }
4194*4882a593Smuzhiyun
4195*4882a593Smuzhiyun /**
4196*4882a593Smuzhiyun * t4_link_l1cfg_core - apply link configuration to MAC/PHY
4197*4882a593Smuzhiyun * @adapter: the adapter
4198*4882a593Smuzhiyun * @mbox: the Firmware Mailbox to use
4199*4882a593Smuzhiyun * @port: the Port ID
4200*4882a593Smuzhiyun * @lc: the Port's Link Configuration
4201*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
4202*4882a593Smuzhiyun * @timeout: time to wait for command to finish before timing out
4203*4882a593Smuzhiyun * (negative implies @sleep_ok=false)
4204*4882a593Smuzhiyun *
4205*4882a593Smuzhiyun * Set up a port's MAC and PHY according to a desired link configuration.
4206*4882a593Smuzhiyun * - If the PHY can auto-negotiate first decide what to advertise, then
4207*4882a593Smuzhiyun * enable/disable auto-negotiation as desired, and reset.
4208*4882a593Smuzhiyun * - If the PHY does not auto-negotiate just reset it.
4209*4882a593Smuzhiyun * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4210*4882a593Smuzhiyun * otherwise do it later based on the outcome of auto-negotiation.
4211*4882a593Smuzhiyun */
t4_link_l1cfg_core(struct adapter * adapter,unsigned int mbox,unsigned int port,struct link_config * lc,u8 sleep_ok,int timeout)4212*4882a593Smuzhiyun int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4213*4882a593Smuzhiyun unsigned int port, struct link_config *lc,
4214*4882a593Smuzhiyun u8 sleep_ok, int timeout)
4215*4882a593Smuzhiyun {
4216*4882a593Smuzhiyun unsigned int fw_caps = adapter->params.fw_caps_support;
4217*4882a593Smuzhiyun struct fw_port_cmd cmd;
4218*4882a593Smuzhiyun fw_port_cap32_t rcap;
4219*4882a593Smuzhiyun int ret;
4220*4882a593Smuzhiyun
4221*4882a593Smuzhiyun if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
4222*4882a593Smuzhiyun lc->autoneg == AUTONEG_ENABLE) {
4223*4882a593Smuzhiyun return -EINVAL;
4224*4882a593Smuzhiyun }
4225*4882a593Smuzhiyun
4226*4882a593Smuzhiyun /* Compute our Requested Port Capabilities and send that on to the
4227*4882a593Smuzhiyun * Firmware.
4228*4882a593Smuzhiyun */
4229*4882a593Smuzhiyun rcap = t4_link_acaps(adapter, port, lc);
4230*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
4231*4882a593Smuzhiyun cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4232*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4233*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(port));
4234*4882a593Smuzhiyun cmd.action_to_len16 =
4235*4882a593Smuzhiyun cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4236*4882a593Smuzhiyun ? FW_PORT_ACTION_L1_CFG
4237*4882a593Smuzhiyun : FW_PORT_ACTION_L1_CFG32) |
4238*4882a593Smuzhiyun FW_LEN16(cmd));
4239*4882a593Smuzhiyun if (fw_caps == FW_CAPS16)
4240*4882a593Smuzhiyun cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4241*4882a593Smuzhiyun else
4242*4882a593Smuzhiyun cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4243*4882a593Smuzhiyun
4244*4882a593Smuzhiyun ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4245*4882a593Smuzhiyun sleep_ok, timeout);
4246*4882a593Smuzhiyun
4247*4882a593Smuzhiyun /* Unfortunately, even if the Requested Port Capabilities "fit" within
4248*4882a593Smuzhiyun * the Physical Port Capabilities, some combinations of features may
4249*4882a593Smuzhiyun * still not be legal. For example, 40Gb/s and Reed-Solomon Forward
4250*4882a593Smuzhiyun * Error Correction. So if the Firmware rejects the L1 Configure
4251*4882a593Smuzhiyun * request, flag that here.
4252*4882a593Smuzhiyun */
4253*4882a593Smuzhiyun if (ret) {
4254*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
4255*4882a593Smuzhiyun "Requested Port Capabilities %#x rejected, error %d\n",
4256*4882a593Smuzhiyun rcap, -ret);
4257*4882a593Smuzhiyun return ret;
4258*4882a593Smuzhiyun }
4259*4882a593Smuzhiyun return 0;
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun
4262*4882a593Smuzhiyun /**
4263*4882a593Smuzhiyun * t4_restart_aneg - restart autonegotiation
4264*4882a593Smuzhiyun * @adap: the adapter
4265*4882a593Smuzhiyun * @mbox: mbox to use for the FW command
4266*4882a593Smuzhiyun * @port: the port id
4267*4882a593Smuzhiyun *
4268*4882a593Smuzhiyun * Restarts autonegotiation for the selected port.
4269*4882a593Smuzhiyun */
t4_restart_aneg(struct adapter * adap,unsigned int mbox,unsigned int port)4270*4882a593Smuzhiyun int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4271*4882a593Smuzhiyun {
4272*4882a593Smuzhiyun unsigned int fw_caps = adap->params.fw_caps_support;
4273*4882a593Smuzhiyun struct fw_port_cmd c;
4274*4882a593Smuzhiyun
4275*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
4276*4882a593Smuzhiyun c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4277*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4278*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(port));
4279*4882a593Smuzhiyun c.action_to_len16 =
4280*4882a593Smuzhiyun cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4281*4882a593Smuzhiyun ? FW_PORT_ACTION_L1_CFG
4282*4882a593Smuzhiyun : FW_PORT_ACTION_L1_CFG32) |
4283*4882a593Smuzhiyun FW_LEN16(c));
4284*4882a593Smuzhiyun if (fw_caps == FW_CAPS16)
4285*4882a593Smuzhiyun c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
4286*4882a593Smuzhiyun else
4287*4882a593Smuzhiyun c.u.l1cfg32.rcap32 = cpu_to_be32(FW_PORT_CAP32_ANEG);
4288*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4289*4882a593Smuzhiyun }
4290*4882a593Smuzhiyun
4291*4882a593Smuzhiyun typedef void (*int_handler_t)(struct adapter *adap);
4292*4882a593Smuzhiyun
4293*4882a593Smuzhiyun struct intr_info {
4294*4882a593Smuzhiyun unsigned int mask; /* bits to check in interrupt status */
4295*4882a593Smuzhiyun const char *msg; /* message to print or NULL */
4296*4882a593Smuzhiyun short stat_idx; /* stat counter to increment or -1 */
4297*4882a593Smuzhiyun unsigned short fatal; /* whether the condition reported is fatal */
4298*4882a593Smuzhiyun int_handler_t int_handler; /* platform-specific int handler */
4299*4882a593Smuzhiyun };
4300*4882a593Smuzhiyun
4301*4882a593Smuzhiyun /**
4302*4882a593Smuzhiyun * t4_handle_intr_status - table driven interrupt handler
4303*4882a593Smuzhiyun * @adapter: the adapter that generated the interrupt
4304*4882a593Smuzhiyun * @reg: the interrupt status register to process
4305*4882a593Smuzhiyun * @acts: table of interrupt actions
4306*4882a593Smuzhiyun *
4307*4882a593Smuzhiyun * A table driven interrupt handler that applies a set of masks to an
4308*4882a593Smuzhiyun * interrupt status word and performs the corresponding actions if the
4309*4882a593Smuzhiyun * interrupts described by the mask have occurred. The actions include
4310*4882a593Smuzhiyun * optionally emitting a warning or alert message. The table is terminated
4311*4882a593Smuzhiyun * by an entry specifying mask 0. Returns the number of fatal interrupt
4312*4882a593Smuzhiyun * conditions.
4313*4882a593Smuzhiyun */
t4_handle_intr_status(struct adapter * adapter,unsigned int reg,const struct intr_info * acts)4314*4882a593Smuzhiyun static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4315*4882a593Smuzhiyun const struct intr_info *acts)
4316*4882a593Smuzhiyun {
4317*4882a593Smuzhiyun int fatal = 0;
4318*4882a593Smuzhiyun unsigned int mask = 0;
4319*4882a593Smuzhiyun unsigned int status = t4_read_reg(adapter, reg);
4320*4882a593Smuzhiyun
4321*4882a593Smuzhiyun for ( ; acts->mask; ++acts) {
4322*4882a593Smuzhiyun if (!(status & acts->mask))
4323*4882a593Smuzhiyun continue;
4324*4882a593Smuzhiyun if (acts->fatal) {
4325*4882a593Smuzhiyun fatal++;
4326*4882a593Smuzhiyun dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4327*4882a593Smuzhiyun status & acts->mask);
4328*4882a593Smuzhiyun } else if (acts->msg && printk_ratelimit())
4329*4882a593Smuzhiyun dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4330*4882a593Smuzhiyun status & acts->mask);
4331*4882a593Smuzhiyun if (acts->int_handler)
4332*4882a593Smuzhiyun acts->int_handler(adapter);
4333*4882a593Smuzhiyun mask |= acts->mask;
4334*4882a593Smuzhiyun }
4335*4882a593Smuzhiyun status &= mask;
4336*4882a593Smuzhiyun if (status) /* clear processed interrupts */
4337*4882a593Smuzhiyun t4_write_reg(adapter, reg, status);
4338*4882a593Smuzhiyun return fatal;
4339*4882a593Smuzhiyun }
4340*4882a593Smuzhiyun
4341*4882a593Smuzhiyun /*
4342*4882a593Smuzhiyun * Interrupt handler for the PCIE module.
4343*4882a593Smuzhiyun */
pcie_intr_handler(struct adapter * adapter)4344*4882a593Smuzhiyun static void pcie_intr_handler(struct adapter *adapter)
4345*4882a593Smuzhiyun {
4346*4882a593Smuzhiyun static const struct intr_info sysbus_intr_info[] = {
4347*4882a593Smuzhiyun { RNPP_F, "RXNP array parity error", -1, 1 },
4348*4882a593Smuzhiyun { RPCP_F, "RXPC array parity error", -1, 1 },
4349*4882a593Smuzhiyun { RCIP_F, "RXCIF array parity error", -1, 1 },
4350*4882a593Smuzhiyun { RCCP_F, "Rx completions control array parity error", -1, 1 },
4351*4882a593Smuzhiyun { RFTP_F, "RXFT array parity error", -1, 1 },
4352*4882a593Smuzhiyun { 0 }
4353*4882a593Smuzhiyun };
4354*4882a593Smuzhiyun static const struct intr_info pcie_port_intr_info[] = {
4355*4882a593Smuzhiyun { TPCP_F, "TXPC array parity error", -1, 1 },
4356*4882a593Smuzhiyun { TNPP_F, "TXNP array parity error", -1, 1 },
4357*4882a593Smuzhiyun { TFTP_F, "TXFT array parity error", -1, 1 },
4358*4882a593Smuzhiyun { TCAP_F, "TXCA array parity error", -1, 1 },
4359*4882a593Smuzhiyun { TCIP_F, "TXCIF array parity error", -1, 1 },
4360*4882a593Smuzhiyun { RCAP_F, "RXCA array parity error", -1, 1 },
4361*4882a593Smuzhiyun { OTDD_F, "outbound request TLP discarded", -1, 1 },
4362*4882a593Smuzhiyun { RDPE_F, "Rx data parity error", -1, 1 },
4363*4882a593Smuzhiyun { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4364*4882a593Smuzhiyun { 0 }
4365*4882a593Smuzhiyun };
4366*4882a593Smuzhiyun static const struct intr_info pcie_intr_info[] = {
4367*4882a593Smuzhiyun { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4368*4882a593Smuzhiyun { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4369*4882a593Smuzhiyun { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4370*4882a593Smuzhiyun { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4371*4882a593Smuzhiyun { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4372*4882a593Smuzhiyun { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4373*4882a593Smuzhiyun { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4374*4882a593Smuzhiyun { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4375*4882a593Smuzhiyun { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4376*4882a593Smuzhiyun { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4377*4882a593Smuzhiyun { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4378*4882a593Smuzhiyun { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4379*4882a593Smuzhiyun { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4380*4882a593Smuzhiyun { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4381*4882a593Smuzhiyun { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4382*4882a593Smuzhiyun { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4383*4882a593Smuzhiyun { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4384*4882a593Smuzhiyun { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4385*4882a593Smuzhiyun { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4386*4882a593Smuzhiyun { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4387*4882a593Smuzhiyun { FIDPERR_F, "PCI FID parity error", -1, 1 },
4388*4882a593Smuzhiyun { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4389*4882a593Smuzhiyun { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4390*4882a593Smuzhiyun { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4391*4882a593Smuzhiyun { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4392*4882a593Smuzhiyun { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4393*4882a593Smuzhiyun { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4394*4882a593Smuzhiyun { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4395*4882a593Smuzhiyun { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4396*4882a593Smuzhiyun { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4397*4882a593Smuzhiyun -1, 0 },
4398*4882a593Smuzhiyun { 0 }
4399*4882a593Smuzhiyun };
4400*4882a593Smuzhiyun
4401*4882a593Smuzhiyun static struct intr_info t5_pcie_intr_info[] = {
4402*4882a593Smuzhiyun { MSTGRPPERR_F, "Master Response Read Queue parity error",
4403*4882a593Smuzhiyun -1, 1 },
4404*4882a593Smuzhiyun { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4405*4882a593Smuzhiyun { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4406*4882a593Smuzhiyun { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4407*4882a593Smuzhiyun { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4408*4882a593Smuzhiyun { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4409*4882a593Smuzhiyun { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4410*4882a593Smuzhiyun { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4411*4882a593Smuzhiyun -1, 1 },
4412*4882a593Smuzhiyun { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4413*4882a593Smuzhiyun -1, 1 },
4414*4882a593Smuzhiyun { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4415*4882a593Smuzhiyun { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4416*4882a593Smuzhiyun { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4417*4882a593Smuzhiyun { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4418*4882a593Smuzhiyun { DREQWRPERR_F, "PCI DMA channel write request parity error",
4419*4882a593Smuzhiyun -1, 1 },
4420*4882a593Smuzhiyun { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4421*4882a593Smuzhiyun { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4422*4882a593Smuzhiyun { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4423*4882a593Smuzhiyun { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4424*4882a593Smuzhiyun { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4425*4882a593Smuzhiyun { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4426*4882a593Smuzhiyun { FIDPERR_F, "PCI FID parity error", -1, 1 },
4427*4882a593Smuzhiyun { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4428*4882a593Smuzhiyun { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4429*4882a593Smuzhiyun { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4430*4882a593Smuzhiyun { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4431*4882a593Smuzhiyun -1, 1 },
4432*4882a593Smuzhiyun { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4433*4882a593Smuzhiyun -1, 1 },
4434*4882a593Smuzhiyun { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4435*4882a593Smuzhiyun { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4436*4882a593Smuzhiyun { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4437*4882a593Smuzhiyun { READRSPERR_F, "Outbound read error", -1, 0 },
4438*4882a593Smuzhiyun { 0 }
4439*4882a593Smuzhiyun };
4440*4882a593Smuzhiyun
4441*4882a593Smuzhiyun int fat;
4442*4882a593Smuzhiyun
4443*4882a593Smuzhiyun if (is_t4(adapter->params.chip))
4444*4882a593Smuzhiyun fat = t4_handle_intr_status(adapter,
4445*4882a593Smuzhiyun PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4446*4882a593Smuzhiyun sysbus_intr_info) +
4447*4882a593Smuzhiyun t4_handle_intr_status(adapter,
4448*4882a593Smuzhiyun PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4449*4882a593Smuzhiyun pcie_port_intr_info) +
4450*4882a593Smuzhiyun t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4451*4882a593Smuzhiyun pcie_intr_info);
4452*4882a593Smuzhiyun else
4453*4882a593Smuzhiyun fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4454*4882a593Smuzhiyun t5_pcie_intr_info);
4455*4882a593Smuzhiyun
4456*4882a593Smuzhiyun if (fat)
4457*4882a593Smuzhiyun t4_fatal_err(adapter);
4458*4882a593Smuzhiyun }
4459*4882a593Smuzhiyun
4460*4882a593Smuzhiyun /*
4461*4882a593Smuzhiyun * TP interrupt handler.
4462*4882a593Smuzhiyun */
tp_intr_handler(struct adapter * adapter)4463*4882a593Smuzhiyun static void tp_intr_handler(struct adapter *adapter)
4464*4882a593Smuzhiyun {
4465*4882a593Smuzhiyun static const struct intr_info tp_intr_info[] = {
4466*4882a593Smuzhiyun { 0x3fffffff, "TP parity error", -1, 1 },
4467*4882a593Smuzhiyun { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4468*4882a593Smuzhiyun { 0 }
4469*4882a593Smuzhiyun };
4470*4882a593Smuzhiyun
4471*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4472*4882a593Smuzhiyun t4_fatal_err(adapter);
4473*4882a593Smuzhiyun }
4474*4882a593Smuzhiyun
4475*4882a593Smuzhiyun /*
4476*4882a593Smuzhiyun * SGE interrupt handler.
4477*4882a593Smuzhiyun */
sge_intr_handler(struct adapter * adapter)4478*4882a593Smuzhiyun static void sge_intr_handler(struct adapter *adapter)
4479*4882a593Smuzhiyun {
4480*4882a593Smuzhiyun u32 v = 0, perr;
4481*4882a593Smuzhiyun u32 err;
4482*4882a593Smuzhiyun
4483*4882a593Smuzhiyun static const struct intr_info sge_intr_info[] = {
4484*4882a593Smuzhiyun { ERR_CPL_EXCEED_IQE_SIZE_F,
4485*4882a593Smuzhiyun "SGE received CPL exceeding IQE size", -1, 1 },
4486*4882a593Smuzhiyun { ERR_INVALID_CIDX_INC_F,
4487*4882a593Smuzhiyun "SGE GTS CIDX increment too large", -1, 0 },
4488*4882a593Smuzhiyun { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4489*4882a593Smuzhiyun { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4490*4882a593Smuzhiyun { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4491*4882a593Smuzhiyun "SGE IQID > 1023 received CPL for FL", -1, 0 },
4492*4882a593Smuzhiyun { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4493*4882a593Smuzhiyun 0 },
4494*4882a593Smuzhiyun { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4495*4882a593Smuzhiyun 0 },
4496*4882a593Smuzhiyun { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4497*4882a593Smuzhiyun 0 },
4498*4882a593Smuzhiyun { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4499*4882a593Smuzhiyun 0 },
4500*4882a593Smuzhiyun { ERR_ING_CTXT_PRIO_F,
4501*4882a593Smuzhiyun "SGE too many priority ingress contexts", -1, 0 },
4502*4882a593Smuzhiyun { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4503*4882a593Smuzhiyun { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4504*4882a593Smuzhiyun { 0 }
4505*4882a593Smuzhiyun };
4506*4882a593Smuzhiyun
4507*4882a593Smuzhiyun static struct intr_info t4t5_sge_intr_info[] = {
4508*4882a593Smuzhiyun { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4509*4882a593Smuzhiyun { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4510*4882a593Smuzhiyun { ERR_EGR_CTXT_PRIO_F,
4511*4882a593Smuzhiyun "SGE too many priority egress contexts", -1, 0 },
4512*4882a593Smuzhiyun { 0 }
4513*4882a593Smuzhiyun };
4514*4882a593Smuzhiyun
4515*4882a593Smuzhiyun perr = t4_read_reg(adapter, SGE_INT_CAUSE1_A);
4516*4882a593Smuzhiyun if (perr) {
4517*4882a593Smuzhiyun v |= perr;
4518*4882a593Smuzhiyun dev_alert(adapter->pdev_dev, "SGE Cause1 Parity Error %#x\n",
4519*4882a593Smuzhiyun perr);
4520*4882a593Smuzhiyun }
4521*4882a593Smuzhiyun
4522*4882a593Smuzhiyun perr = t4_read_reg(adapter, SGE_INT_CAUSE2_A);
4523*4882a593Smuzhiyun if (perr) {
4524*4882a593Smuzhiyun v |= perr;
4525*4882a593Smuzhiyun dev_alert(adapter->pdev_dev, "SGE Cause2 Parity Error %#x\n",
4526*4882a593Smuzhiyun perr);
4527*4882a593Smuzhiyun }
4528*4882a593Smuzhiyun
4529*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) >= CHELSIO_T5) {
4530*4882a593Smuzhiyun perr = t4_read_reg(adapter, SGE_INT_CAUSE5_A);
4531*4882a593Smuzhiyun /* Parity error (CRC) for err_T_RxCRC is trivial, ignore it */
4532*4882a593Smuzhiyun perr &= ~ERR_T_RXCRC_F;
4533*4882a593Smuzhiyun if (perr) {
4534*4882a593Smuzhiyun v |= perr;
4535*4882a593Smuzhiyun dev_alert(adapter->pdev_dev,
4536*4882a593Smuzhiyun "SGE Cause5 Parity Error %#x\n", perr);
4537*4882a593Smuzhiyun }
4538*4882a593Smuzhiyun }
4539*4882a593Smuzhiyun
4540*4882a593Smuzhiyun v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4541*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4542*4882a593Smuzhiyun v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4543*4882a593Smuzhiyun t4t5_sge_intr_info);
4544*4882a593Smuzhiyun
4545*4882a593Smuzhiyun err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4546*4882a593Smuzhiyun if (err & ERROR_QID_VALID_F) {
4547*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4548*4882a593Smuzhiyun ERROR_QID_G(err));
4549*4882a593Smuzhiyun if (err & UNCAPTURED_ERROR_F)
4550*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
4551*4882a593Smuzhiyun "SGE UNCAPTURED_ERROR set (clearing)\n");
4552*4882a593Smuzhiyun t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4553*4882a593Smuzhiyun UNCAPTURED_ERROR_F);
4554*4882a593Smuzhiyun }
4555*4882a593Smuzhiyun
4556*4882a593Smuzhiyun if (v != 0)
4557*4882a593Smuzhiyun t4_fatal_err(adapter);
4558*4882a593Smuzhiyun }
4559*4882a593Smuzhiyun
4560*4882a593Smuzhiyun #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4561*4882a593Smuzhiyun OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4562*4882a593Smuzhiyun #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4563*4882a593Smuzhiyun IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4564*4882a593Smuzhiyun
4565*4882a593Smuzhiyun /*
4566*4882a593Smuzhiyun * CIM interrupt handler.
4567*4882a593Smuzhiyun */
cim_intr_handler(struct adapter * adapter)4568*4882a593Smuzhiyun static void cim_intr_handler(struct adapter *adapter)
4569*4882a593Smuzhiyun {
4570*4882a593Smuzhiyun static const struct intr_info cim_intr_info[] = {
4571*4882a593Smuzhiyun { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4572*4882a593Smuzhiyun { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4573*4882a593Smuzhiyun { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4574*4882a593Smuzhiyun { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4575*4882a593Smuzhiyun { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4576*4882a593Smuzhiyun { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4577*4882a593Smuzhiyun { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4578*4882a593Smuzhiyun { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4579*4882a593Smuzhiyun { 0 }
4580*4882a593Smuzhiyun };
4581*4882a593Smuzhiyun static const struct intr_info cim_upintr_info[] = {
4582*4882a593Smuzhiyun { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4583*4882a593Smuzhiyun { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4584*4882a593Smuzhiyun { ILLWRINT_F, "CIM illegal write", -1, 1 },
4585*4882a593Smuzhiyun { ILLRDINT_F, "CIM illegal read", -1, 1 },
4586*4882a593Smuzhiyun { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4587*4882a593Smuzhiyun { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4588*4882a593Smuzhiyun { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4589*4882a593Smuzhiyun { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4590*4882a593Smuzhiyun { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4591*4882a593Smuzhiyun { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4592*4882a593Smuzhiyun { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4593*4882a593Smuzhiyun { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4594*4882a593Smuzhiyun { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4595*4882a593Smuzhiyun { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4596*4882a593Smuzhiyun { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4597*4882a593Smuzhiyun { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4598*4882a593Smuzhiyun { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4599*4882a593Smuzhiyun { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4600*4882a593Smuzhiyun { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4601*4882a593Smuzhiyun { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4602*4882a593Smuzhiyun { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4603*4882a593Smuzhiyun { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4604*4882a593Smuzhiyun { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4605*4882a593Smuzhiyun { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4606*4882a593Smuzhiyun { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4607*4882a593Smuzhiyun { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4608*4882a593Smuzhiyun { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4609*4882a593Smuzhiyun { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4610*4882a593Smuzhiyun { 0 }
4611*4882a593Smuzhiyun };
4612*4882a593Smuzhiyun
4613*4882a593Smuzhiyun u32 val, fw_err;
4614*4882a593Smuzhiyun int fat;
4615*4882a593Smuzhiyun
4616*4882a593Smuzhiyun fw_err = t4_read_reg(adapter, PCIE_FW_A);
4617*4882a593Smuzhiyun if (fw_err & PCIE_FW_ERR_F)
4618*4882a593Smuzhiyun t4_report_fw_error(adapter);
4619*4882a593Smuzhiyun
4620*4882a593Smuzhiyun /* When the Firmware detects an internal error which normally
4621*4882a593Smuzhiyun * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4622*4882a593Smuzhiyun * in order to make sure the Host sees the Firmware Crash. So
4623*4882a593Smuzhiyun * if we have a Timer0 interrupt and don't see a Firmware Crash,
4624*4882a593Smuzhiyun * ignore the Timer0 interrupt.
4625*4882a593Smuzhiyun */
4626*4882a593Smuzhiyun
4627*4882a593Smuzhiyun val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4628*4882a593Smuzhiyun if (val & TIMER0INT_F)
4629*4882a593Smuzhiyun if (!(fw_err & PCIE_FW_ERR_F) ||
4630*4882a593Smuzhiyun (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4631*4882a593Smuzhiyun t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4632*4882a593Smuzhiyun TIMER0INT_F);
4633*4882a593Smuzhiyun
4634*4882a593Smuzhiyun fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4635*4882a593Smuzhiyun cim_intr_info) +
4636*4882a593Smuzhiyun t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4637*4882a593Smuzhiyun cim_upintr_info);
4638*4882a593Smuzhiyun if (fat)
4639*4882a593Smuzhiyun t4_fatal_err(adapter);
4640*4882a593Smuzhiyun }
4641*4882a593Smuzhiyun
4642*4882a593Smuzhiyun /*
4643*4882a593Smuzhiyun * ULP RX interrupt handler.
4644*4882a593Smuzhiyun */
ulprx_intr_handler(struct adapter * adapter)4645*4882a593Smuzhiyun static void ulprx_intr_handler(struct adapter *adapter)
4646*4882a593Smuzhiyun {
4647*4882a593Smuzhiyun static const struct intr_info ulprx_intr_info[] = {
4648*4882a593Smuzhiyun { 0x1800000, "ULPRX context error", -1, 1 },
4649*4882a593Smuzhiyun { 0x7fffff, "ULPRX parity error", -1, 1 },
4650*4882a593Smuzhiyun { 0 }
4651*4882a593Smuzhiyun };
4652*4882a593Smuzhiyun
4653*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4654*4882a593Smuzhiyun t4_fatal_err(adapter);
4655*4882a593Smuzhiyun }
4656*4882a593Smuzhiyun
4657*4882a593Smuzhiyun /*
4658*4882a593Smuzhiyun * ULP TX interrupt handler.
4659*4882a593Smuzhiyun */
ulptx_intr_handler(struct adapter * adapter)4660*4882a593Smuzhiyun static void ulptx_intr_handler(struct adapter *adapter)
4661*4882a593Smuzhiyun {
4662*4882a593Smuzhiyun static const struct intr_info ulptx_intr_info[] = {
4663*4882a593Smuzhiyun { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4664*4882a593Smuzhiyun 0 },
4665*4882a593Smuzhiyun { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4666*4882a593Smuzhiyun 0 },
4667*4882a593Smuzhiyun { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4668*4882a593Smuzhiyun 0 },
4669*4882a593Smuzhiyun { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4670*4882a593Smuzhiyun 0 },
4671*4882a593Smuzhiyun { 0xfffffff, "ULPTX parity error", -1, 1 },
4672*4882a593Smuzhiyun { 0 }
4673*4882a593Smuzhiyun };
4674*4882a593Smuzhiyun
4675*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4676*4882a593Smuzhiyun t4_fatal_err(adapter);
4677*4882a593Smuzhiyun }
4678*4882a593Smuzhiyun
4679*4882a593Smuzhiyun /*
4680*4882a593Smuzhiyun * PM TX interrupt handler.
4681*4882a593Smuzhiyun */
pmtx_intr_handler(struct adapter * adapter)4682*4882a593Smuzhiyun static void pmtx_intr_handler(struct adapter *adapter)
4683*4882a593Smuzhiyun {
4684*4882a593Smuzhiyun static const struct intr_info pmtx_intr_info[] = {
4685*4882a593Smuzhiyun { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4686*4882a593Smuzhiyun { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4687*4882a593Smuzhiyun { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4688*4882a593Smuzhiyun { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4689*4882a593Smuzhiyun { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4690*4882a593Smuzhiyun { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4691*4882a593Smuzhiyun { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4692*4882a593Smuzhiyun -1, 1 },
4693*4882a593Smuzhiyun { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4694*4882a593Smuzhiyun { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4695*4882a593Smuzhiyun { 0 }
4696*4882a593Smuzhiyun };
4697*4882a593Smuzhiyun
4698*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4699*4882a593Smuzhiyun t4_fatal_err(adapter);
4700*4882a593Smuzhiyun }
4701*4882a593Smuzhiyun
4702*4882a593Smuzhiyun /*
4703*4882a593Smuzhiyun * PM RX interrupt handler.
4704*4882a593Smuzhiyun */
pmrx_intr_handler(struct adapter * adapter)4705*4882a593Smuzhiyun static void pmrx_intr_handler(struct adapter *adapter)
4706*4882a593Smuzhiyun {
4707*4882a593Smuzhiyun static const struct intr_info pmrx_intr_info[] = {
4708*4882a593Smuzhiyun { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4709*4882a593Smuzhiyun { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4710*4882a593Smuzhiyun { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4711*4882a593Smuzhiyun { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4712*4882a593Smuzhiyun -1, 1 },
4713*4882a593Smuzhiyun { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4714*4882a593Smuzhiyun { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4715*4882a593Smuzhiyun { 0 }
4716*4882a593Smuzhiyun };
4717*4882a593Smuzhiyun
4718*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4719*4882a593Smuzhiyun t4_fatal_err(adapter);
4720*4882a593Smuzhiyun }
4721*4882a593Smuzhiyun
4722*4882a593Smuzhiyun /*
4723*4882a593Smuzhiyun * CPL switch interrupt handler.
4724*4882a593Smuzhiyun */
cplsw_intr_handler(struct adapter * adapter)4725*4882a593Smuzhiyun static void cplsw_intr_handler(struct adapter *adapter)
4726*4882a593Smuzhiyun {
4727*4882a593Smuzhiyun static const struct intr_info cplsw_intr_info[] = {
4728*4882a593Smuzhiyun { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4729*4882a593Smuzhiyun { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4730*4882a593Smuzhiyun { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4731*4882a593Smuzhiyun { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4732*4882a593Smuzhiyun { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4733*4882a593Smuzhiyun { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4734*4882a593Smuzhiyun { 0 }
4735*4882a593Smuzhiyun };
4736*4882a593Smuzhiyun
4737*4882a593Smuzhiyun if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4738*4882a593Smuzhiyun t4_fatal_err(adapter);
4739*4882a593Smuzhiyun }
4740*4882a593Smuzhiyun
4741*4882a593Smuzhiyun /*
4742*4882a593Smuzhiyun * LE interrupt handler.
4743*4882a593Smuzhiyun */
le_intr_handler(struct adapter * adap)4744*4882a593Smuzhiyun static void le_intr_handler(struct adapter *adap)
4745*4882a593Smuzhiyun {
4746*4882a593Smuzhiyun enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4747*4882a593Smuzhiyun static const struct intr_info le_intr_info[] = {
4748*4882a593Smuzhiyun { LIPMISS_F, "LE LIP miss", -1, 0 },
4749*4882a593Smuzhiyun { LIP0_F, "LE 0 LIP error", -1, 0 },
4750*4882a593Smuzhiyun { PARITYERR_F, "LE parity error", -1, 1 },
4751*4882a593Smuzhiyun { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4752*4882a593Smuzhiyun { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4753*4882a593Smuzhiyun { 0 }
4754*4882a593Smuzhiyun };
4755*4882a593Smuzhiyun
4756*4882a593Smuzhiyun static struct intr_info t6_le_intr_info[] = {
4757*4882a593Smuzhiyun { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4758*4882a593Smuzhiyun { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4759*4882a593Smuzhiyun { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
4760*4882a593Smuzhiyun { TCAMINTPERR_F, "LE parity error", -1, 1 },
4761*4882a593Smuzhiyun { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4762*4882a593Smuzhiyun { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4763*4882a593Smuzhiyun { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
4764*4882a593Smuzhiyun { 0 }
4765*4882a593Smuzhiyun };
4766*4882a593Smuzhiyun
4767*4882a593Smuzhiyun if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4768*4882a593Smuzhiyun (chip <= CHELSIO_T5) ?
4769*4882a593Smuzhiyun le_intr_info : t6_le_intr_info))
4770*4882a593Smuzhiyun t4_fatal_err(adap);
4771*4882a593Smuzhiyun }
4772*4882a593Smuzhiyun
4773*4882a593Smuzhiyun /*
4774*4882a593Smuzhiyun * MPS interrupt handler.
4775*4882a593Smuzhiyun */
mps_intr_handler(struct adapter * adapter)4776*4882a593Smuzhiyun static void mps_intr_handler(struct adapter *adapter)
4777*4882a593Smuzhiyun {
4778*4882a593Smuzhiyun static const struct intr_info mps_rx_intr_info[] = {
4779*4882a593Smuzhiyun { 0xffffff, "MPS Rx parity error", -1, 1 },
4780*4882a593Smuzhiyun { 0 }
4781*4882a593Smuzhiyun };
4782*4882a593Smuzhiyun static const struct intr_info mps_tx_intr_info[] = {
4783*4882a593Smuzhiyun { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4784*4882a593Smuzhiyun { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4785*4882a593Smuzhiyun { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4786*4882a593Smuzhiyun -1, 1 },
4787*4882a593Smuzhiyun { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4788*4882a593Smuzhiyun -1, 1 },
4789*4882a593Smuzhiyun { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4790*4882a593Smuzhiyun { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4791*4882a593Smuzhiyun { FRMERR_F, "MPS Tx framing error", -1, 1 },
4792*4882a593Smuzhiyun { 0 }
4793*4882a593Smuzhiyun };
4794*4882a593Smuzhiyun static const struct intr_info t6_mps_tx_intr_info[] = {
4795*4882a593Smuzhiyun { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4796*4882a593Smuzhiyun { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4797*4882a593Smuzhiyun { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4798*4882a593Smuzhiyun -1, 1 },
4799*4882a593Smuzhiyun { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4800*4882a593Smuzhiyun -1, 1 },
4801*4882a593Smuzhiyun /* MPS Tx Bubble is normal for T6 */
4802*4882a593Smuzhiyun { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4803*4882a593Smuzhiyun { FRMERR_F, "MPS Tx framing error", -1, 1 },
4804*4882a593Smuzhiyun { 0 }
4805*4882a593Smuzhiyun };
4806*4882a593Smuzhiyun static const struct intr_info mps_trc_intr_info[] = {
4807*4882a593Smuzhiyun { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4808*4882a593Smuzhiyun { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4809*4882a593Smuzhiyun -1, 1 },
4810*4882a593Smuzhiyun { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4811*4882a593Smuzhiyun { 0 }
4812*4882a593Smuzhiyun };
4813*4882a593Smuzhiyun static const struct intr_info mps_stat_sram_intr_info[] = {
4814*4882a593Smuzhiyun { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4815*4882a593Smuzhiyun { 0 }
4816*4882a593Smuzhiyun };
4817*4882a593Smuzhiyun static const struct intr_info mps_stat_tx_intr_info[] = {
4818*4882a593Smuzhiyun { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4819*4882a593Smuzhiyun { 0 }
4820*4882a593Smuzhiyun };
4821*4882a593Smuzhiyun static const struct intr_info mps_stat_rx_intr_info[] = {
4822*4882a593Smuzhiyun { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4823*4882a593Smuzhiyun { 0 }
4824*4882a593Smuzhiyun };
4825*4882a593Smuzhiyun static const struct intr_info mps_cls_intr_info[] = {
4826*4882a593Smuzhiyun { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4827*4882a593Smuzhiyun { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4828*4882a593Smuzhiyun { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4829*4882a593Smuzhiyun { 0 }
4830*4882a593Smuzhiyun };
4831*4882a593Smuzhiyun
4832*4882a593Smuzhiyun int fat;
4833*4882a593Smuzhiyun
4834*4882a593Smuzhiyun fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4835*4882a593Smuzhiyun mps_rx_intr_info) +
4836*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4837*4882a593Smuzhiyun is_t6(adapter->params.chip)
4838*4882a593Smuzhiyun ? t6_mps_tx_intr_info
4839*4882a593Smuzhiyun : mps_tx_intr_info) +
4840*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4841*4882a593Smuzhiyun mps_trc_intr_info) +
4842*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4843*4882a593Smuzhiyun mps_stat_sram_intr_info) +
4844*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4845*4882a593Smuzhiyun mps_stat_tx_intr_info) +
4846*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4847*4882a593Smuzhiyun mps_stat_rx_intr_info) +
4848*4882a593Smuzhiyun t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4849*4882a593Smuzhiyun mps_cls_intr_info);
4850*4882a593Smuzhiyun
4851*4882a593Smuzhiyun t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4852*4882a593Smuzhiyun t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4853*4882a593Smuzhiyun if (fat)
4854*4882a593Smuzhiyun t4_fatal_err(adapter);
4855*4882a593Smuzhiyun }
4856*4882a593Smuzhiyun
4857*4882a593Smuzhiyun #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4858*4882a593Smuzhiyun ECC_UE_INT_CAUSE_F)
4859*4882a593Smuzhiyun
4860*4882a593Smuzhiyun /*
4861*4882a593Smuzhiyun * EDC/MC interrupt handler.
4862*4882a593Smuzhiyun */
mem_intr_handler(struct adapter * adapter,int idx)4863*4882a593Smuzhiyun static void mem_intr_handler(struct adapter *adapter, int idx)
4864*4882a593Smuzhiyun {
4865*4882a593Smuzhiyun static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4866*4882a593Smuzhiyun
4867*4882a593Smuzhiyun unsigned int addr, cnt_addr, v;
4868*4882a593Smuzhiyun
4869*4882a593Smuzhiyun if (idx <= MEM_EDC1) {
4870*4882a593Smuzhiyun addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4871*4882a593Smuzhiyun cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4872*4882a593Smuzhiyun } else if (idx == MEM_MC) {
4873*4882a593Smuzhiyun if (is_t4(adapter->params.chip)) {
4874*4882a593Smuzhiyun addr = MC_INT_CAUSE_A;
4875*4882a593Smuzhiyun cnt_addr = MC_ECC_STATUS_A;
4876*4882a593Smuzhiyun } else {
4877*4882a593Smuzhiyun addr = MC_P_INT_CAUSE_A;
4878*4882a593Smuzhiyun cnt_addr = MC_P_ECC_STATUS_A;
4879*4882a593Smuzhiyun }
4880*4882a593Smuzhiyun } else {
4881*4882a593Smuzhiyun addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4882*4882a593Smuzhiyun cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4883*4882a593Smuzhiyun }
4884*4882a593Smuzhiyun
4885*4882a593Smuzhiyun v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4886*4882a593Smuzhiyun if (v & PERR_INT_CAUSE_F)
4887*4882a593Smuzhiyun dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4888*4882a593Smuzhiyun name[idx]);
4889*4882a593Smuzhiyun if (v & ECC_CE_INT_CAUSE_F) {
4890*4882a593Smuzhiyun u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4891*4882a593Smuzhiyun
4892*4882a593Smuzhiyun t4_edc_err_read(adapter, idx);
4893*4882a593Smuzhiyun
4894*4882a593Smuzhiyun t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4895*4882a593Smuzhiyun if (printk_ratelimit())
4896*4882a593Smuzhiyun dev_warn(adapter->pdev_dev,
4897*4882a593Smuzhiyun "%u %s correctable ECC data error%s\n",
4898*4882a593Smuzhiyun cnt, name[idx], cnt > 1 ? "s" : "");
4899*4882a593Smuzhiyun }
4900*4882a593Smuzhiyun if (v & ECC_UE_INT_CAUSE_F)
4901*4882a593Smuzhiyun dev_alert(adapter->pdev_dev,
4902*4882a593Smuzhiyun "%s uncorrectable ECC data error\n", name[idx]);
4903*4882a593Smuzhiyun
4904*4882a593Smuzhiyun t4_write_reg(adapter, addr, v);
4905*4882a593Smuzhiyun if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4906*4882a593Smuzhiyun t4_fatal_err(adapter);
4907*4882a593Smuzhiyun }
4908*4882a593Smuzhiyun
4909*4882a593Smuzhiyun /*
4910*4882a593Smuzhiyun * MA interrupt handler.
4911*4882a593Smuzhiyun */
ma_intr_handler(struct adapter * adap)4912*4882a593Smuzhiyun static void ma_intr_handler(struct adapter *adap)
4913*4882a593Smuzhiyun {
4914*4882a593Smuzhiyun u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4915*4882a593Smuzhiyun
4916*4882a593Smuzhiyun if (status & MEM_PERR_INT_CAUSE_F) {
4917*4882a593Smuzhiyun dev_alert(adap->pdev_dev,
4918*4882a593Smuzhiyun "MA parity error, parity status %#x\n",
4919*4882a593Smuzhiyun t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4920*4882a593Smuzhiyun if (is_t5(adap->params.chip))
4921*4882a593Smuzhiyun dev_alert(adap->pdev_dev,
4922*4882a593Smuzhiyun "MA parity error, parity status %#x\n",
4923*4882a593Smuzhiyun t4_read_reg(adap,
4924*4882a593Smuzhiyun MA_PARITY_ERROR_STATUS2_A));
4925*4882a593Smuzhiyun }
4926*4882a593Smuzhiyun if (status & MEM_WRAP_INT_CAUSE_F) {
4927*4882a593Smuzhiyun v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4928*4882a593Smuzhiyun dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4929*4882a593Smuzhiyun "client %u to address %#x\n",
4930*4882a593Smuzhiyun MEM_WRAP_CLIENT_NUM_G(v),
4931*4882a593Smuzhiyun MEM_WRAP_ADDRESS_G(v) << 4);
4932*4882a593Smuzhiyun }
4933*4882a593Smuzhiyun t4_write_reg(adap, MA_INT_CAUSE_A, status);
4934*4882a593Smuzhiyun t4_fatal_err(adap);
4935*4882a593Smuzhiyun }
4936*4882a593Smuzhiyun
4937*4882a593Smuzhiyun /*
4938*4882a593Smuzhiyun * SMB interrupt handler.
4939*4882a593Smuzhiyun */
smb_intr_handler(struct adapter * adap)4940*4882a593Smuzhiyun static void smb_intr_handler(struct adapter *adap)
4941*4882a593Smuzhiyun {
4942*4882a593Smuzhiyun static const struct intr_info smb_intr_info[] = {
4943*4882a593Smuzhiyun { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4944*4882a593Smuzhiyun { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4945*4882a593Smuzhiyun { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4946*4882a593Smuzhiyun { 0 }
4947*4882a593Smuzhiyun };
4948*4882a593Smuzhiyun
4949*4882a593Smuzhiyun if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4950*4882a593Smuzhiyun t4_fatal_err(adap);
4951*4882a593Smuzhiyun }
4952*4882a593Smuzhiyun
4953*4882a593Smuzhiyun /*
4954*4882a593Smuzhiyun * NC-SI interrupt handler.
4955*4882a593Smuzhiyun */
ncsi_intr_handler(struct adapter * adap)4956*4882a593Smuzhiyun static void ncsi_intr_handler(struct adapter *adap)
4957*4882a593Smuzhiyun {
4958*4882a593Smuzhiyun static const struct intr_info ncsi_intr_info[] = {
4959*4882a593Smuzhiyun { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4960*4882a593Smuzhiyun { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4961*4882a593Smuzhiyun { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4962*4882a593Smuzhiyun { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4963*4882a593Smuzhiyun { 0 }
4964*4882a593Smuzhiyun };
4965*4882a593Smuzhiyun
4966*4882a593Smuzhiyun if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4967*4882a593Smuzhiyun t4_fatal_err(adap);
4968*4882a593Smuzhiyun }
4969*4882a593Smuzhiyun
4970*4882a593Smuzhiyun /*
4971*4882a593Smuzhiyun * XGMAC interrupt handler.
4972*4882a593Smuzhiyun */
xgmac_intr_handler(struct adapter * adap,int port)4973*4882a593Smuzhiyun static void xgmac_intr_handler(struct adapter *adap, int port)
4974*4882a593Smuzhiyun {
4975*4882a593Smuzhiyun u32 v, int_cause_reg;
4976*4882a593Smuzhiyun
4977*4882a593Smuzhiyun if (is_t4(adap->params.chip))
4978*4882a593Smuzhiyun int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4979*4882a593Smuzhiyun else
4980*4882a593Smuzhiyun int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4981*4882a593Smuzhiyun
4982*4882a593Smuzhiyun v = t4_read_reg(adap, int_cause_reg);
4983*4882a593Smuzhiyun
4984*4882a593Smuzhiyun v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4985*4882a593Smuzhiyun if (!v)
4986*4882a593Smuzhiyun return;
4987*4882a593Smuzhiyun
4988*4882a593Smuzhiyun if (v & TXFIFO_PRTY_ERR_F)
4989*4882a593Smuzhiyun dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4990*4882a593Smuzhiyun port);
4991*4882a593Smuzhiyun if (v & RXFIFO_PRTY_ERR_F)
4992*4882a593Smuzhiyun dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4993*4882a593Smuzhiyun port);
4994*4882a593Smuzhiyun t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4995*4882a593Smuzhiyun t4_fatal_err(adap);
4996*4882a593Smuzhiyun }
4997*4882a593Smuzhiyun
4998*4882a593Smuzhiyun /*
4999*4882a593Smuzhiyun * PL interrupt handler.
5000*4882a593Smuzhiyun */
pl_intr_handler(struct adapter * adap)5001*4882a593Smuzhiyun static void pl_intr_handler(struct adapter *adap)
5002*4882a593Smuzhiyun {
5003*4882a593Smuzhiyun static const struct intr_info pl_intr_info[] = {
5004*4882a593Smuzhiyun { FATALPERR_F, "T4 fatal parity error", -1, 1 },
5005*4882a593Smuzhiyun { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
5006*4882a593Smuzhiyun { 0 }
5007*4882a593Smuzhiyun };
5008*4882a593Smuzhiyun
5009*4882a593Smuzhiyun if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
5010*4882a593Smuzhiyun t4_fatal_err(adap);
5011*4882a593Smuzhiyun }
5012*4882a593Smuzhiyun
5013*4882a593Smuzhiyun #define PF_INTR_MASK (PFSW_F)
5014*4882a593Smuzhiyun #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
5015*4882a593Smuzhiyun EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
5016*4882a593Smuzhiyun CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
5017*4882a593Smuzhiyun
5018*4882a593Smuzhiyun /**
5019*4882a593Smuzhiyun * t4_slow_intr_handler - control path interrupt handler
5020*4882a593Smuzhiyun * @adapter: the adapter
5021*4882a593Smuzhiyun *
5022*4882a593Smuzhiyun * T4 interrupt handler for non-data global interrupt events, e.g., errors.
5023*4882a593Smuzhiyun * The designation 'slow' is because it involves register reads, while
5024*4882a593Smuzhiyun * data interrupts typically don't involve any MMIOs.
5025*4882a593Smuzhiyun */
t4_slow_intr_handler(struct adapter * adapter)5026*4882a593Smuzhiyun int t4_slow_intr_handler(struct adapter *adapter)
5027*4882a593Smuzhiyun {
5028*4882a593Smuzhiyun /* There are rare cases where a PL_INT_CAUSE bit may end up getting
5029*4882a593Smuzhiyun * set when the corresponding PL_INT_ENABLE bit isn't set. It's
5030*4882a593Smuzhiyun * easiest just to mask that case here.
5031*4882a593Smuzhiyun */
5032*4882a593Smuzhiyun u32 raw_cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
5033*4882a593Smuzhiyun u32 enable = t4_read_reg(adapter, PL_INT_ENABLE_A);
5034*4882a593Smuzhiyun u32 cause = raw_cause & enable;
5035*4882a593Smuzhiyun
5036*4882a593Smuzhiyun if (!(cause & GLBL_INTR_MASK))
5037*4882a593Smuzhiyun return 0;
5038*4882a593Smuzhiyun if (cause & CIM_F)
5039*4882a593Smuzhiyun cim_intr_handler(adapter);
5040*4882a593Smuzhiyun if (cause & MPS_F)
5041*4882a593Smuzhiyun mps_intr_handler(adapter);
5042*4882a593Smuzhiyun if (cause & NCSI_F)
5043*4882a593Smuzhiyun ncsi_intr_handler(adapter);
5044*4882a593Smuzhiyun if (cause & PL_F)
5045*4882a593Smuzhiyun pl_intr_handler(adapter);
5046*4882a593Smuzhiyun if (cause & SMB_F)
5047*4882a593Smuzhiyun smb_intr_handler(adapter);
5048*4882a593Smuzhiyun if (cause & XGMAC0_F)
5049*4882a593Smuzhiyun xgmac_intr_handler(adapter, 0);
5050*4882a593Smuzhiyun if (cause & XGMAC1_F)
5051*4882a593Smuzhiyun xgmac_intr_handler(adapter, 1);
5052*4882a593Smuzhiyun if (cause & XGMAC_KR0_F)
5053*4882a593Smuzhiyun xgmac_intr_handler(adapter, 2);
5054*4882a593Smuzhiyun if (cause & XGMAC_KR1_F)
5055*4882a593Smuzhiyun xgmac_intr_handler(adapter, 3);
5056*4882a593Smuzhiyun if (cause & PCIE_F)
5057*4882a593Smuzhiyun pcie_intr_handler(adapter);
5058*4882a593Smuzhiyun if (cause & MC_F)
5059*4882a593Smuzhiyun mem_intr_handler(adapter, MEM_MC);
5060*4882a593Smuzhiyun if (is_t5(adapter->params.chip) && (cause & MC1_F))
5061*4882a593Smuzhiyun mem_intr_handler(adapter, MEM_MC1);
5062*4882a593Smuzhiyun if (cause & EDC0_F)
5063*4882a593Smuzhiyun mem_intr_handler(adapter, MEM_EDC0);
5064*4882a593Smuzhiyun if (cause & EDC1_F)
5065*4882a593Smuzhiyun mem_intr_handler(adapter, MEM_EDC1);
5066*4882a593Smuzhiyun if (cause & LE_F)
5067*4882a593Smuzhiyun le_intr_handler(adapter);
5068*4882a593Smuzhiyun if (cause & TP_F)
5069*4882a593Smuzhiyun tp_intr_handler(adapter);
5070*4882a593Smuzhiyun if (cause & MA_F)
5071*4882a593Smuzhiyun ma_intr_handler(adapter);
5072*4882a593Smuzhiyun if (cause & PM_TX_F)
5073*4882a593Smuzhiyun pmtx_intr_handler(adapter);
5074*4882a593Smuzhiyun if (cause & PM_RX_F)
5075*4882a593Smuzhiyun pmrx_intr_handler(adapter);
5076*4882a593Smuzhiyun if (cause & ULP_RX_F)
5077*4882a593Smuzhiyun ulprx_intr_handler(adapter);
5078*4882a593Smuzhiyun if (cause & CPL_SWITCH_F)
5079*4882a593Smuzhiyun cplsw_intr_handler(adapter);
5080*4882a593Smuzhiyun if (cause & SGE_F)
5081*4882a593Smuzhiyun sge_intr_handler(adapter);
5082*4882a593Smuzhiyun if (cause & ULP_TX_F)
5083*4882a593Smuzhiyun ulptx_intr_handler(adapter);
5084*4882a593Smuzhiyun
5085*4882a593Smuzhiyun /* Clear the interrupts just processed for which we are the master. */
5086*4882a593Smuzhiyun t4_write_reg(adapter, PL_INT_CAUSE_A, raw_cause & GLBL_INTR_MASK);
5087*4882a593Smuzhiyun (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
5088*4882a593Smuzhiyun return 1;
5089*4882a593Smuzhiyun }
5090*4882a593Smuzhiyun
5091*4882a593Smuzhiyun /**
5092*4882a593Smuzhiyun * t4_intr_enable - enable interrupts
5093*4882a593Smuzhiyun * @adapter: the adapter whose interrupts should be enabled
5094*4882a593Smuzhiyun *
5095*4882a593Smuzhiyun * Enable PF-specific interrupts for the calling function and the top-level
5096*4882a593Smuzhiyun * interrupt concentrator for global interrupts. Interrupts are already
5097*4882a593Smuzhiyun * enabled at each module, here we just enable the roots of the interrupt
5098*4882a593Smuzhiyun * hierarchies.
5099*4882a593Smuzhiyun *
5100*4882a593Smuzhiyun * Note: this function should be called only when the driver manages
5101*4882a593Smuzhiyun * non PF-specific interrupts from the various HW modules. Only one PCI
5102*4882a593Smuzhiyun * function at a time should be doing this.
5103*4882a593Smuzhiyun */
t4_intr_enable(struct adapter * adapter)5104*4882a593Smuzhiyun void t4_intr_enable(struct adapter *adapter)
5105*4882a593Smuzhiyun {
5106*4882a593Smuzhiyun u32 val = 0;
5107*4882a593Smuzhiyun u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5108*4882a593Smuzhiyun u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5109*4882a593Smuzhiyun SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5110*4882a593Smuzhiyun
5111*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5112*4882a593Smuzhiyun val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5113*4882a593Smuzhiyun t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5114*4882a593Smuzhiyun ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5115*4882a593Smuzhiyun ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5116*4882a593Smuzhiyun ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5117*4882a593Smuzhiyun ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5118*4882a593Smuzhiyun ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5119*4882a593Smuzhiyun DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5120*4882a593Smuzhiyun t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5121*4882a593Smuzhiyun t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5122*4882a593Smuzhiyun }
5123*4882a593Smuzhiyun
5124*4882a593Smuzhiyun /**
5125*4882a593Smuzhiyun * t4_intr_disable - disable interrupts
5126*4882a593Smuzhiyun * @adapter: the adapter whose interrupts should be disabled
5127*4882a593Smuzhiyun *
5128*4882a593Smuzhiyun * Disable interrupts. We only disable the top-level interrupt
5129*4882a593Smuzhiyun * concentrators. The caller must be a PCI function managing global
5130*4882a593Smuzhiyun * interrupts.
5131*4882a593Smuzhiyun */
t4_intr_disable(struct adapter * adapter)5132*4882a593Smuzhiyun void t4_intr_disable(struct adapter *adapter)
5133*4882a593Smuzhiyun {
5134*4882a593Smuzhiyun u32 whoami, pf;
5135*4882a593Smuzhiyun
5136*4882a593Smuzhiyun if (pci_channel_offline(adapter->pdev))
5137*4882a593Smuzhiyun return;
5138*4882a593Smuzhiyun
5139*4882a593Smuzhiyun whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5140*4882a593Smuzhiyun pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5141*4882a593Smuzhiyun SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5142*4882a593Smuzhiyun
5143*4882a593Smuzhiyun t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5144*4882a593Smuzhiyun t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5145*4882a593Smuzhiyun }
5146*4882a593Smuzhiyun
t4_chip_rss_size(struct adapter * adap)5147*4882a593Smuzhiyun unsigned int t4_chip_rss_size(struct adapter *adap)
5148*4882a593Smuzhiyun {
5149*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5150*4882a593Smuzhiyun return RSS_NENTRIES;
5151*4882a593Smuzhiyun else
5152*4882a593Smuzhiyun return T6_RSS_NENTRIES;
5153*4882a593Smuzhiyun }
5154*4882a593Smuzhiyun
5155*4882a593Smuzhiyun /**
5156*4882a593Smuzhiyun * t4_config_rss_range - configure a portion of the RSS mapping table
5157*4882a593Smuzhiyun * @adapter: the adapter
5158*4882a593Smuzhiyun * @mbox: mbox to use for the FW command
5159*4882a593Smuzhiyun * @viid: virtual interface whose RSS subtable is to be written
5160*4882a593Smuzhiyun * @start: start entry in the table to write
5161*4882a593Smuzhiyun * @n: how many table entries to write
5162*4882a593Smuzhiyun * @rspq: values for the response queue lookup table
5163*4882a593Smuzhiyun * @nrspq: number of values in @rspq
5164*4882a593Smuzhiyun *
5165*4882a593Smuzhiyun * Programs the selected part of the VI's RSS mapping table with the
5166*4882a593Smuzhiyun * provided values. If @nrspq < @n the supplied values are used repeatedly
5167*4882a593Smuzhiyun * until the full table range is populated.
5168*4882a593Smuzhiyun *
5169*4882a593Smuzhiyun * The caller must ensure the values in @rspq are in the range allowed for
5170*4882a593Smuzhiyun * @viid.
5171*4882a593Smuzhiyun */
t4_config_rss_range(struct adapter * adapter,int mbox,unsigned int viid,int start,int n,const u16 * rspq,unsigned int nrspq)5172*4882a593Smuzhiyun int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5173*4882a593Smuzhiyun int start, int n, const u16 *rspq, unsigned int nrspq)
5174*4882a593Smuzhiyun {
5175*4882a593Smuzhiyun int ret;
5176*4882a593Smuzhiyun const u16 *rsp = rspq;
5177*4882a593Smuzhiyun const u16 *rsp_end = rspq + nrspq;
5178*4882a593Smuzhiyun struct fw_rss_ind_tbl_cmd cmd;
5179*4882a593Smuzhiyun
5180*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
5181*4882a593Smuzhiyun cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5182*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5183*4882a593Smuzhiyun FW_RSS_IND_TBL_CMD_VIID_V(viid));
5184*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5185*4882a593Smuzhiyun
5186*4882a593Smuzhiyun /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5187*4882a593Smuzhiyun while (n > 0) {
5188*4882a593Smuzhiyun int nq = min(n, 32);
5189*4882a593Smuzhiyun __be32 *qp = &cmd.iq0_to_iq2;
5190*4882a593Smuzhiyun
5191*4882a593Smuzhiyun cmd.niqid = cpu_to_be16(nq);
5192*4882a593Smuzhiyun cmd.startidx = cpu_to_be16(start);
5193*4882a593Smuzhiyun
5194*4882a593Smuzhiyun start += nq;
5195*4882a593Smuzhiyun n -= nq;
5196*4882a593Smuzhiyun
5197*4882a593Smuzhiyun while (nq > 0) {
5198*4882a593Smuzhiyun unsigned int v;
5199*4882a593Smuzhiyun
5200*4882a593Smuzhiyun v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5201*4882a593Smuzhiyun if (++rsp >= rsp_end)
5202*4882a593Smuzhiyun rsp = rspq;
5203*4882a593Smuzhiyun v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5204*4882a593Smuzhiyun if (++rsp >= rsp_end)
5205*4882a593Smuzhiyun rsp = rspq;
5206*4882a593Smuzhiyun v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5207*4882a593Smuzhiyun if (++rsp >= rsp_end)
5208*4882a593Smuzhiyun rsp = rspq;
5209*4882a593Smuzhiyun
5210*4882a593Smuzhiyun *qp++ = cpu_to_be32(v);
5211*4882a593Smuzhiyun nq -= 3;
5212*4882a593Smuzhiyun }
5213*4882a593Smuzhiyun
5214*4882a593Smuzhiyun ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5215*4882a593Smuzhiyun if (ret)
5216*4882a593Smuzhiyun return ret;
5217*4882a593Smuzhiyun }
5218*4882a593Smuzhiyun return 0;
5219*4882a593Smuzhiyun }
5220*4882a593Smuzhiyun
5221*4882a593Smuzhiyun /**
5222*4882a593Smuzhiyun * t4_config_glbl_rss - configure the global RSS mode
5223*4882a593Smuzhiyun * @adapter: the adapter
5224*4882a593Smuzhiyun * @mbox: mbox to use for the FW command
5225*4882a593Smuzhiyun * @mode: global RSS mode
5226*4882a593Smuzhiyun * @flags: mode-specific flags
5227*4882a593Smuzhiyun *
5228*4882a593Smuzhiyun * Sets the global RSS mode.
5229*4882a593Smuzhiyun */
t4_config_glbl_rss(struct adapter * adapter,int mbox,unsigned int mode,unsigned int flags)5230*4882a593Smuzhiyun int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5231*4882a593Smuzhiyun unsigned int flags)
5232*4882a593Smuzhiyun {
5233*4882a593Smuzhiyun struct fw_rss_glb_config_cmd c;
5234*4882a593Smuzhiyun
5235*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
5236*4882a593Smuzhiyun c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5237*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5238*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5239*4882a593Smuzhiyun if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5240*4882a593Smuzhiyun c.u.manual.mode_pkd =
5241*4882a593Smuzhiyun cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5242*4882a593Smuzhiyun } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5243*4882a593Smuzhiyun c.u.basicvirtual.mode_pkd =
5244*4882a593Smuzhiyun cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5245*4882a593Smuzhiyun c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5246*4882a593Smuzhiyun } else
5247*4882a593Smuzhiyun return -EINVAL;
5248*4882a593Smuzhiyun return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5249*4882a593Smuzhiyun }
5250*4882a593Smuzhiyun
5251*4882a593Smuzhiyun /**
5252*4882a593Smuzhiyun * t4_config_vi_rss - configure per VI RSS settings
5253*4882a593Smuzhiyun * @adapter: the adapter
5254*4882a593Smuzhiyun * @mbox: mbox to use for the FW command
5255*4882a593Smuzhiyun * @viid: the VI id
5256*4882a593Smuzhiyun * @flags: RSS flags
5257*4882a593Smuzhiyun * @defq: id of the default RSS queue for the VI.
5258*4882a593Smuzhiyun *
5259*4882a593Smuzhiyun * Configures VI-specific RSS properties.
5260*4882a593Smuzhiyun */
t4_config_vi_rss(struct adapter * adapter,int mbox,unsigned int viid,unsigned int flags,unsigned int defq)5261*4882a593Smuzhiyun int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5262*4882a593Smuzhiyun unsigned int flags, unsigned int defq)
5263*4882a593Smuzhiyun {
5264*4882a593Smuzhiyun struct fw_rss_vi_config_cmd c;
5265*4882a593Smuzhiyun
5266*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
5267*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5268*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5269*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5270*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5271*4882a593Smuzhiyun c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5272*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5273*4882a593Smuzhiyun return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5274*4882a593Smuzhiyun }
5275*4882a593Smuzhiyun
5276*4882a593Smuzhiyun /* Read an RSS table row */
rd_rss_row(struct adapter * adap,int row,u32 * val)5277*4882a593Smuzhiyun static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5278*4882a593Smuzhiyun {
5279*4882a593Smuzhiyun t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5280*4882a593Smuzhiyun return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5281*4882a593Smuzhiyun 5, 0, val);
5282*4882a593Smuzhiyun }
5283*4882a593Smuzhiyun
5284*4882a593Smuzhiyun /**
5285*4882a593Smuzhiyun * t4_read_rss - read the contents of the RSS mapping table
5286*4882a593Smuzhiyun * @adapter: the adapter
5287*4882a593Smuzhiyun * @map: holds the contents of the RSS mapping table
5288*4882a593Smuzhiyun *
5289*4882a593Smuzhiyun * Reads the contents of the RSS hash->queue mapping table.
5290*4882a593Smuzhiyun */
t4_read_rss(struct adapter * adapter,u16 * map)5291*4882a593Smuzhiyun int t4_read_rss(struct adapter *adapter, u16 *map)
5292*4882a593Smuzhiyun {
5293*4882a593Smuzhiyun int i, ret, nentries;
5294*4882a593Smuzhiyun u32 val;
5295*4882a593Smuzhiyun
5296*4882a593Smuzhiyun nentries = t4_chip_rss_size(adapter);
5297*4882a593Smuzhiyun for (i = 0; i < nentries / 2; ++i) {
5298*4882a593Smuzhiyun ret = rd_rss_row(adapter, i, &val);
5299*4882a593Smuzhiyun if (ret)
5300*4882a593Smuzhiyun return ret;
5301*4882a593Smuzhiyun *map++ = LKPTBLQUEUE0_G(val);
5302*4882a593Smuzhiyun *map++ = LKPTBLQUEUE1_G(val);
5303*4882a593Smuzhiyun }
5304*4882a593Smuzhiyun return 0;
5305*4882a593Smuzhiyun }
5306*4882a593Smuzhiyun
t4_use_ldst(struct adapter * adap)5307*4882a593Smuzhiyun static unsigned int t4_use_ldst(struct adapter *adap)
5308*4882a593Smuzhiyun {
5309*4882a593Smuzhiyun return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
5310*4882a593Smuzhiyun }
5311*4882a593Smuzhiyun
5312*4882a593Smuzhiyun /**
5313*4882a593Smuzhiyun * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5314*4882a593Smuzhiyun * @adap: the adapter
5315*4882a593Smuzhiyun * @cmd: TP fw ldst address space type
5316*4882a593Smuzhiyun * @vals: where the indirect register values are stored/written
5317*4882a593Smuzhiyun * @nregs: how many indirect registers to read/write
5318*4882a593Smuzhiyun * @start_index: index of first indirect register to read/write
5319*4882a593Smuzhiyun * @rw: Read (1) or Write (0)
5320*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5321*4882a593Smuzhiyun *
5322*4882a593Smuzhiyun * Access TP indirect registers through LDST
5323*4882a593Smuzhiyun */
t4_tp_fw_ldst_rw(struct adapter * adap,int cmd,u32 * vals,unsigned int nregs,unsigned int start_index,unsigned int rw,bool sleep_ok)5324*4882a593Smuzhiyun static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5325*4882a593Smuzhiyun unsigned int nregs, unsigned int start_index,
5326*4882a593Smuzhiyun unsigned int rw, bool sleep_ok)
5327*4882a593Smuzhiyun {
5328*4882a593Smuzhiyun int ret = 0;
5329*4882a593Smuzhiyun unsigned int i;
5330*4882a593Smuzhiyun struct fw_ldst_cmd c;
5331*4882a593Smuzhiyun
5332*4882a593Smuzhiyun for (i = 0; i < nregs; i++) {
5333*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
5334*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5335*4882a593Smuzhiyun FW_CMD_REQUEST_F |
5336*4882a593Smuzhiyun (rw ? FW_CMD_READ_F :
5337*4882a593Smuzhiyun FW_CMD_WRITE_F) |
5338*4882a593Smuzhiyun FW_LDST_CMD_ADDRSPACE_V(cmd));
5339*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5340*4882a593Smuzhiyun
5341*4882a593Smuzhiyun c.u.addrval.addr = cpu_to_be32(start_index + i);
5342*4882a593Smuzhiyun c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5343*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5344*4882a593Smuzhiyun sleep_ok);
5345*4882a593Smuzhiyun if (ret)
5346*4882a593Smuzhiyun return ret;
5347*4882a593Smuzhiyun
5348*4882a593Smuzhiyun if (rw)
5349*4882a593Smuzhiyun vals[i] = be32_to_cpu(c.u.addrval.val);
5350*4882a593Smuzhiyun }
5351*4882a593Smuzhiyun return 0;
5352*4882a593Smuzhiyun }
5353*4882a593Smuzhiyun
5354*4882a593Smuzhiyun /**
5355*4882a593Smuzhiyun * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5356*4882a593Smuzhiyun * @adap: the adapter
5357*4882a593Smuzhiyun * @reg_addr: Address Register
5358*4882a593Smuzhiyun * @reg_data: Data register
5359*4882a593Smuzhiyun * @buff: where the indirect register values are stored/written
5360*4882a593Smuzhiyun * @nregs: how many indirect registers to read/write
5361*4882a593Smuzhiyun * @start_index: index of first indirect register to read/write
5362*4882a593Smuzhiyun * @rw: READ(1) or WRITE(0)
5363*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5364*4882a593Smuzhiyun *
5365*4882a593Smuzhiyun * Read/Write TP indirect registers through LDST if possible.
5366*4882a593Smuzhiyun * Else, use backdoor access
5367*4882a593Smuzhiyun **/
t4_tp_indirect_rw(struct adapter * adap,u32 reg_addr,u32 reg_data,u32 * buff,u32 nregs,u32 start_index,int rw,bool sleep_ok)5368*4882a593Smuzhiyun static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5369*4882a593Smuzhiyun u32 *buff, u32 nregs, u32 start_index, int rw,
5370*4882a593Smuzhiyun bool sleep_ok)
5371*4882a593Smuzhiyun {
5372*4882a593Smuzhiyun int rc = -EINVAL;
5373*4882a593Smuzhiyun int cmd;
5374*4882a593Smuzhiyun
5375*4882a593Smuzhiyun switch (reg_addr) {
5376*4882a593Smuzhiyun case TP_PIO_ADDR_A:
5377*4882a593Smuzhiyun cmd = FW_LDST_ADDRSPC_TP_PIO;
5378*4882a593Smuzhiyun break;
5379*4882a593Smuzhiyun case TP_TM_PIO_ADDR_A:
5380*4882a593Smuzhiyun cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5381*4882a593Smuzhiyun break;
5382*4882a593Smuzhiyun case TP_MIB_INDEX_A:
5383*4882a593Smuzhiyun cmd = FW_LDST_ADDRSPC_TP_MIB;
5384*4882a593Smuzhiyun break;
5385*4882a593Smuzhiyun default:
5386*4882a593Smuzhiyun goto indirect_access;
5387*4882a593Smuzhiyun }
5388*4882a593Smuzhiyun
5389*4882a593Smuzhiyun if (t4_use_ldst(adap))
5390*4882a593Smuzhiyun rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5391*4882a593Smuzhiyun sleep_ok);
5392*4882a593Smuzhiyun
5393*4882a593Smuzhiyun indirect_access:
5394*4882a593Smuzhiyun
5395*4882a593Smuzhiyun if (rc) {
5396*4882a593Smuzhiyun if (rw)
5397*4882a593Smuzhiyun t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5398*4882a593Smuzhiyun start_index);
5399*4882a593Smuzhiyun else
5400*4882a593Smuzhiyun t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5401*4882a593Smuzhiyun start_index);
5402*4882a593Smuzhiyun }
5403*4882a593Smuzhiyun }
5404*4882a593Smuzhiyun
5405*4882a593Smuzhiyun /**
5406*4882a593Smuzhiyun * t4_tp_pio_read - Read TP PIO registers
5407*4882a593Smuzhiyun * @adap: the adapter
5408*4882a593Smuzhiyun * @buff: where the indirect register values are written
5409*4882a593Smuzhiyun * @nregs: how many indirect registers to read
5410*4882a593Smuzhiyun * @start_index: index of first indirect register to read
5411*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5412*4882a593Smuzhiyun *
5413*4882a593Smuzhiyun * Read TP PIO Registers
5414*4882a593Smuzhiyun **/
t4_tp_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5415*4882a593Smuzhiyun void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5416*4882a593Smuzhiyun u32 start_index, bool sleep_ok)
5417*4882a593Smuzhiyun {
5418*4882a593Smuzhiyun t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5419*4882a593Smuzhiyun start_index, 1, sleep_ok);
5420*4882a593Smuzhiyun }
5421*4882a593Smuzhiyun
5422*4882a593Smuzhiyun /**
5423*4882a593Smuzhiyun * t4_tp_pio_write - Write TP PIO registers
5424*4882a593Smuzhiyun * @adap: the adapter
5425*4882a593Smuzhiyun * @buff: where the indirect register values are stored
5426*4882a593Smuzhiyun * @nregs: how many indirect registers to write
5427*4882a593Smuzhiyun * @start_index: index of first indirect register to write
5428*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5429*4882a593Smuzhiyun *
5430*4882a593Smuzhiyun * Write TP PIO Registers
5431*4882a593Smuzhiyun **/
t4_tp_pio_write(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5432*4882a593Smuzhiyun static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5433*4882a593Smuzhiyun u32 start_index, bool sleep_ok)
5434*4882a593Smuzhiyun {
5435*4882a593Smuzhiyun t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5436*4882a593Smuzhiyun start_index, 0, sleep_ok);
5437*4882a593Smuzhiyun }
5438*4882a593Smuzhiyun
5439*4882a593Smuzhiyun /**
5440*4882a593Smuzhiyun * t4_tp_tm_pio_read - Read TP TM PIO registers
5441*4882a593Smuzhiyun * @adap: the adapter
5442*4882a593Smuzhiyun * @buff: where the indirect register values are written
5443*4882a593Smuzhiyun * @nregs: how many indirect registers to read
5444*4882a593Smuzhiyun * @start_index: index of first indirect register to read
5445*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5446*4882a593Smuzhiyun *
5447*4882a593Smuzhiyun * Read TP TM PIO Registers
5448*4882a593Smuzhiyun **/
t4_tp_tm_pio_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5449*4882a593Smuzhiyun void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5450*4882a593Smuzhiyun u32 start_index, bool sleep_ok)
5451*4882a593Smuzhiyun {
5452*4882a593Smuzhiyun t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5453*4882a593Smuzhiyun nregs, start_index, 1, sleep_ok);
5454*4882a593Smuzhiyun }
5455*4882a593Smuzhiyun
5456*4882a593Smuzhiyun /**
5457*4882a593Smuzhiyun * t4_tp_mib_read - Read TP MIB registers
5458*4882a593Smuzhiyun * @adap: the adapter
5459*4882a593Smuzhiyun * @buff: where the indirect register values are written
5460*4882a593Smuzhiyun * @nregs: how many indirect registers to read
5461*4882a593Smuzhiyun * @start_index: index of first indirect register to read
5462*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5463*4882a593Smuzhiyun *
5464*4882a593Smuzhiyun * Read TP MIB Registers
5465*4882a593Smuzhiyun **/
t4_tp_mib_read(struct adapter * adap,u32 * buff,u32 nregs,u32 start_index,bool sleep_ok)5466*4882a593Smuzhiyun void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5467*4882a593Smuzhiyun bool sleep_ok)
5468*4882a593Smuzhiyun {
5469*4882a593Smuzhiyun t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5470*4882a593Smuzhiyun start_index, 1, sleep_ok);
5471*4882a593Smuzhiyun }
5472*4882a593Smuzhiyun
5473*4882a593Smuzhiyun /**
5474*4882a593Smuzhiyun * t4_read_rss_key - read the global RSS key
5475*4882a593Smuzhiyun * @adap: the adapter
5476*4882a593Smuzhiyun * @key: 10-entry array holding the 320-bit RSS key
5477*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5478*4882a593Smuzhiyun *
5479*4882a593Smuzhiyun * Reads the global 320-bit RSS key.
5480*4882a593Smuzhiyun */
t4_read_rss_key(struct adapter * adap,u32 * key,bool sleep_ok)5481*4882a593Smuzhiyun void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5482*4882a593Smuzhiyun {
5483*4882a593Smuzhiyun t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5484*4882a593Smuzhiyun }
5485*4882a593Smuzhiyun
5486*4882a593Smuzhiyun /**
5487*4882a593Smuzhiyun * t4_write_rss_key - program one of the RSS keys
5488*4882a593Smuzhiyun * @adap: the adapter
5489*4882a593Smuzhiyun * @key: 10-entry array holding the 320-bit RSS key
5490*4882a593Smuzhiyun * @idx: which RSS key to write
5491*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5492*4882a593Smuzhiyun *
5493*4882a593Smuzhiyun * Writes one of the RSS keys with the given 320-bit value. If @idx is
5494*4882a593Smuzhiyun * 0..15 the corresponding entry in the RSS key table is written,
5495*4882a593Smuzhiyun * otherwise the global RSS key is written.
5496*4882a593Smuzhiyun */
t4_write_rss_key(struct adapter * adap,const u32 * key,int idx,bool sleep_ok)5497*4882a593Smuzhiyun void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5498*4882a593Smuzhiyun bool sleep_ok)
5499*4882a593Smuzhiyun {
5500*4882a593Smuzhiyun u8 rss_key_addr_cnt = 16;
5501*4882a593Smuzhiyun u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5502*4882a593Smuzhiyun
5503*4882a593Smuzhiyun /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5504*4882a593Smuzhiyun * allows access to key addresses 16-63 by using KeyWrAddrX
5505*4882a593Smuzhiyun * as index[5:4](upper 2) into key table
5506*4882a593Smuzhiyun */
5507*4882a593Smuzhiyun if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5508*4882a593Smuzhiyun (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5509*4882a593Smuzhiyun rss_key_addr_cnt = 32;
5510*4882a593Smuzhiyun
5511*4882a593Smuzhiyun t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5512*4882a593Smuzhiyun
5513*4882a593Smuzhiyun if (idx >= 0 && idx < rss_key_addr_cnt) {
5514*4882a593Smuzhiyun if (rss_key_addr_cnt > 16)
5515*4882a593Smuzhiyun t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5516*4882a593Smuzhiyun KEYWRADDRX_V(idx >> 4) |
5517*4882a593Smuzhiyun T6_VFWRADDR_V(idx) | KEYWREN_F);
5518*4882a593Smuzhiyun else
5519*4882a593Smuzhiyun t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5520*4882a593Smuzhiyun KEYWRADDR_V(idx) | KEYWREN_F);
5521*4882a593Smuzhiyun }
5522*4882a593Smuzhiyun }
5523*4882a593Smuzhiyun
5524*4882a593Smuzhiyun /**
5525*4882a593Smuzhiyun * t4_read_rss_pf_config - read PF RSS Configuration Table
5526*4882a593Smuzhiyun * @adapter: the adapter
5527*4882a593Smuzhiyun * @index: the entry in the PF RSS table to read
5528*4882a593Smuzhiyun * @valp: where to store the returned value
5529*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5530*4882a593Smuzhiyun *
5531*4882a593Smuzhiyun * Reads the PF RSS Configuration Table at the specified index and returns
5532*4882a593Smuzhiyun * the value found there.
5533*4882a593Smuzhiyun */
t4_read_rss_pf_config(struct adapter * adapter,unsigned int index,u32 * valp,bool sleep_ok)5534*4882a593Smuzhiyun void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5535*4882a593Smuzhiyun u32 *valp, bool sleep_ok)
5536*4882a593Smuzhiyun {
5537*4882a593Smuzhiyun t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5538*4882a593Smuzhiyun }
5539*4882a593Smuzhiyun
5540*4882a593Smuzhiyun /**
5541*4882a593Smuzhiyun * t4_read_rss_vf_config - read VF RSS Configuration Table
5542*4882a593Smuzhiyun * @adapter: the adapter
5543*4882a593Smuzhiyun * @index: the entry in the VF RSS table to read
5544*4882a593Smuzhiyun * @vfl: where to store the returned VFL
5545*4882a593Smuzhiyun * @vfh: where to store the returned VFH
5546*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5547*4882a593Smuzhiyun *
5548*4882a593Smuzhiyun * Reads the VF RSS Configuration Table at the specified index and returns
5549*4882a593Smuzhiyun * the (VFL, VFH) values found there.
5550*4882a593Smuzhiyun */
t4_read_rss_vf_config(struct adapter * adapter,unsigned int index,u32 * vfl,u32 * vfh,bool sleep_ok)5551*4882a593Smuzhiyun void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5552*4882a593Smuzhiyun u32 *vfl, u32 *vfh, bool sleep_ok)
5553*4882a593Smuzhiyun {
5554*4882a593Smuzhiyun u32 vrt, mask, data;
5555*4882a593Smuzhiyun
5556*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5557*4882a593Smuzhiyun mask = VFWRADDR_V(VFWRADDR_M);
5558*4882a593Smuzhiyun data = VFWRADDR_V(index);
5559*4882a593Smuzhiyun } else {
5560*4882a593Smuzhiyun mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5561*4882a593Smuzhiyun data = T6_VFWRADDR_V(index);
5562*4882a593Smuzhiyun }
5563*4882a593Smuzhiyun
5564*4882a593Smuzhiyun /* Request that the index'th VF Table values be read into VFL/VFH.
5565*4882a593Smuzhiyun */
5566*4882a593Smuzhiyun vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5567*4882a593Smuzhiyun vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5568*4882a593Smuzhiyun vrt |= data | VFRDEN_F;
5569*4882a593Smuzhiyun t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5570*4882a593Smuzhiyun
5571*4882a593Smuzhiyun /* Grab the VFL/VFH values ...
5572*4882a593Smuzhiyun */
5573*4882a593Smuzhiyun t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5574*4882a593Smuzhiyun t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5575*4882a593Smuzhiyun }
5576*4882a593Smuzhiyun
5577*4882a593Smuzhiyun /**
5578*4882a593Smuzhiyun * t4_read_rss_pf_map - read PF RSS Map
5579*4882a593Smuzhiyun * @adapter: the adapter
5580*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5581*4882a593Smuzhiyun *
5582*4882a593Smuzhiyun * Reads the PF RSS Map register and returns its value.
5583*4882a593Smuzhiyun */
t4_read_rss_pf_map(struct adapter * adapter,bool sleep_ok)5584*4882a593Smuzhiyun u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5585*4882a593Smuzhiyun {
5586*4882a593Smuzhiyun u32 pfmap;
5587*4882a593Smuzhiyun
5588*4882a593Smuzhiyun t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5589*4882a593Smuzhiyun return pfmap;
5590*4882a593Smuzhiyun }
5591*4882a593Smuzhiyun
5592*4882a593Smuzhiyun /**
5593*4882a593Smuzhiyun * t4_read_rss_pf_mask - read PF RSS Mask
5594*4882a593Smuzhiyun * @adapter: the adapter
5595*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5596*4882a593Smuzhiyun *
5597*4882a593Smuzhiyun * Reads the PF RSS Mask register and returns its value.
5598*4882a593Smuzhiyun */
t4_read_rss_pf_mask(struct adapter * adapter,bool sleep_ok)5599*4882a593Smuzhiyun u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5600*4882a593Smuzhiyun {
5601*4882a593Smuzhiyun u32 pfmask;
5602*4882a593Smuzhiyun
5603*4882a593Smuzhiyun t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5604*4882a593Smuzhiyun return pfmask;
5605*4882a593Smuzhiyun }
5606*4882a593Smuzhiyun
5607*4882a593Smuzhiyun /**
5608*4882a593Smuzhiyun * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5609*4882a593Smuzhiyun * @adap: the adapter
5610*4882a593Smuzhiyun * @v4: holds the TCP/IP counter values
5611*4882a593Smuzhiyun * @v6: holds the TCP/IPv6 counter values
5612*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5613*4882a593Smuzhiyun *
5614*4882a593Smuzhiyun * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5615*4882a593Smuzhiyun * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5616*4882a593Smuzhiyun */
t4_tp_get_tcp_stats(struct adapter * adap,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6,bool sleep_ok)5617*4882a593Smuzhiyun void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5618*4882a593Smuzhiyun struct tp_tcp_stats *v6, bool sleep_ok)
5619*4882a593Smuzhiyun {
5620*4882a593Smuzhiyun u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5621*4882a593Smuzhiyun
5622*4882a593Smuzhiyun #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5623*4882a593Smuzhiyun #define STAT(x) val[STAT_IDX(x)]
5624*4882a593Smuzhiyun #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5625*4882a593Smuzhiyun
5626*4882a593Smuzhiyun if (v4) {
5627*4882a593Smuzhiyun t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5628*4882a593Smuzhiyun TP_MIB_TCP_OUT_RST_A, sleep_ok);
5629*4882a593Smuzhiyun v4->tcp_out_rsts = STAT(OUT_RST);
5630*4882a593Smuzhiyun v4->tcp_in_segs = STAT64(IN_SEG);
5631*4882a593Smuzhiyun v4->tcp_out_segs = STAT64(OUT_SEG);
5632*4882a593Smuzhiyun v4->tcp_retrans_segs = STAT64(RXT_SEG);
5633*4882a593Smuzhiyun }
5634*4882a593Smuzhiyun if (v6) {
5635*4882a593Smuzhiyun t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5636*4882a593Smuzhiyun TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5637*4882a593Smuzhiyun v6->tcp_out_rsts = STAT(OUT_RST);
5638*4882a593Smuzhiyun v6->tcp_in_segs = STAT64(IN_SEG);
5639*4882a593Smuzhiyun v6->tcp_out_segs = STAT64(OUT_SEG);
5640*4882a593Smuzhiyun v6->tcp_retrans_segs = STAT64(RXT_SEG);
5641*4882a593Smuzhiyun }
5642*4882a593Smuzhiyun #undef STAT64
5643*4882a593Smuzhiyun #undef STAT
5644*4882a593Smuzhiyun #undef STAT_IDX
5645*4882a593Smuzhiyun }
5646*4882a593Smuzhiyun
5647*4882a593Smuzhiyun /**
5648*4882a593Smuzhiyun * t4_tp_get_err_stats - read TP's error MIB counters
5649*4882a593Smuzhiyun * @adap: the adapter
5650*4882a593Smuzhiyun * @st: holds the counter values
5651*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5652*4882a593Smuzhiyun *
5653*4882a593Smuzhiyun * Returns the values of TP's error counters.
5654*4882a593Smuzhiyun */
t4_tp_get_err_stats(struct adapter * adap,struct tp_err_stats * st,bool sleep_ok)5655*4882a593Smuzhiyun void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5656*4882a593Smuzhiyun bool sleep_ok)
5657*4882a593Smuzhiyun {
5658*4882a593Smuzhiyun int nchan = adap->params.arch.nchan;
5659*4882a593Smuzhiyun
5660*4882a593Smuzhiyun t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5661*4882a593Smuzhiyun sleep_ok);
5662*4882a593Smuzhiyun t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5663*4882a593Smuzhiyun sleep_ok);
5664*4882a593Smuzhiyun t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5665*4882a593Smuzhiyun sleep_ok);
5666*4882a593Smuzhiyun t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5667*4882a593Smuzhiyun TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5668*4882a593Smuzhiyun t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5669*4882a593Smuzhiyun TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5670*4882a593Smuzhiyun t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5671*4882a593Smuzhiyun sleep_ok);
5672*4882a593Smuzhiyun t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5673*4882a593Smuzhiyun TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5674*4882a593Smuzhiyun t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5675*4882a593Smuzhiyun TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5676*4882a593Smuzhiyun t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5677*4882a593Smuzhiyun sleep_ok);
5678*4882a593Smuzhiyun }
5679*4882a593Smuzhiyun
5680*4882a593Smuzhiyun /**
5681*4882a593Smuzhiyun * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5682*4882a593Smuzhiyun * @adap: the adapter
5683*4882a593Smuzhiyun * @st: holds the counter values
5684*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5685*4882a593Smuzhiyun *
5686*4882a593Smuzhiyun * Returns the values of TP's CPL counters.
5687*4882a593Smuzhiyun */
t4_tp_get_cpl_stats(struct adapter * adap,struct tp_cpl_stats * st,bool sleep_ok)5688*4882a593Smuzhiyun void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5689*4882a593Smuzhiyun bool sleep_ok)
5690*4882a593Smuzhiyun {
5691*4882a593Smuzhiyun int nchan = adap->params.arch.nchan;
5692*4882a593Smuzhiyun
5693*4882a593Smuzhiyun t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5694*4882a593Smuzhiyun
5695*4882a593Smuzhiyun t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5696*4882a593Smuzhiyun }
5697*4882a593Smuzhiyun
5698*4882a593Smuzhiyun /**
5699*4882a593Smuzhiyun * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5700*4882a593Smuzhiyun * @adap: the adapter
5701*4882a593Smuzhiyun * @st: holds the counter values
5702*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5703*4882a593Smuzhiyun *
5704*4882a593Smuzhiyun * Returns the values of TP's RDMA counters.
5705*4882a593Smuzhiyun */
t4_tp_get_rdma_stats(struct adapter * adap,struct tp_rdma_stats * st,bool sleep_ok)5706*4882a593Smuzhiyun void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5707*4882a593Smuzhiyun bool sleep_ok)
5708*4882a593Smuzhiyun {
5709*4882a593Smuzhiyun t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5710*4882a593Smuzhiyun sleep_ok);
5711*4882a593Smuzhiyun }
5712*4882a593Smuzhiyun
5713*4882a593Smuzhiyun /**
5714*4882a593Smuzhiyun * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5715*4882a593Smuzhiyun * @adap: the adapter
5716*4882a593Smuzhiyun * @idx: the port index
5717*4882a593Smuzhiyun * @st: holds the counter values
5718*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5719*4882a593Smuzhiyun *
5720*4882a593Smuzhiyun * Returns the values of TP's FCoE counters for the selected port.
5721*4882a593Smuzhiyun */
t4_get_fcoe_stats(struct adapter * adap,unsigned int idx,struct tp_fcoe_stats * st,bool sleep_ok)5722*4882a593Smuzhiyun void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5723*4882a593Smuzhiyun struct tp_fcoe_stats *st, bool sleep_ok)
5724*4882a593Smuzhiyun {
5725*4882a593Smuzhiyun u32 val[2];
5726*4882a593Smuzhiyun
5727*4882a593Smuzhiyun t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5728*4882a593Smuzhiyun sleep_ok);
5729*4882a593Smuzhiyun
5730*4882a593Smuzhiyun t4_tp_mib_read(adap, &st->frames_drop, 1,
5731*4882a593Smuzhiyun TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5732*4882a593Smuzhiyun
5733*4882a593Smuzhiyun t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5734*4882a593Smuzhiyun sleep_ok);
5735*4882a593Smuzhiyun
5736*4882a593Smuzhiyun st->octets_ddp = ((u64)val[0] << 32) | val[1];
5737*4882a593Smuzhiyun }
5738*4882a593Smuzhiyun
5739*4882a593Smuzhiyun /**
5740*4882a593Smuzhiyun * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5741*4882a593Smuzhiyun * @adap: the adapter
5742*4882a593Smuzhiyun * @st: holds the counter values
5743*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
5744*4882a593Smuzhiyun *
5745*4882a593Smuzhiyun * Returns the values of TP's counters for non-TCP directly-placed packets.
5746*4882a593Smuzhiyun */
t4_get_usm_stats(struct adapter * adap,struct tp_usm_stats * st,bool sleep_ok)5747*4882a593Smuzhiyun void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5748*4882a593Smuzhiyun bool sleep_ok)
5749*4882a593Smuzhiyun {
5750*4882a593Smuzhiyun u32 val[4];
5751*4882a593Smuzhiyun
5752*4882a593Smuzhiyun t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5753*4882a593Smuzhiyun st->frames = val[0];
5754*4882a593Smuzhiyun st->drops = val[1];
5755*4882a593Smuzhiyun st->octets = ((u64)val[2] << 32) | val[3];
5756*4882a593Smuzhiyun }
5757*4882a593Smuzhiyun
5758*4882a593Smuzhiyun /**
5759*4882a593Smuzhiyun * t4_read_mtu_tbl - returns the values in the HW path MTU table
5760*4882a593Smuzhiyun * @adap: the adapter
5761*4882a593Smuzhiyun * @mtus: where to store the MTU values
5762*4882a593Smuzhiyun * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5763*4882a593Smuzhiyun *
5764*4882a593Smuzhiyun * Reads the HW path MTU table.
5765*4882a593Smuzhiyun */
t4_read_mtu_tbl(struct adapter * adap,u16 * mtus,u8 * mtu_log)5766*4882a593Smuzhiyun void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5767*4882a593Smuzhiyun {
5768*4882a593Smuzhiyun u32 v;
5769*4882a593Smuzhiyun int i;
5770*4882a593Smuzhiyun
5771*4882a593Smuzhiyun for (i = 0; i < NMTUS; ++i) {
5772*4882a593Smuzhiyun t4_write_reg(adap, TP_MTU_TABLE_A,
5773*4882a593Smuzhiyun MTUINDEX_V(0xff) | MTUVALUE_V(i));
5774*4882a593Smuzhiyun v = t4_read_reg(adap, TP_MTU_TABLE_A);
5775*4882a593Smuzhiyun mtus[i] = MTUVALUE_G(v);
5776*4882a593Smuzhiyun if (mtu_log)
5777*4882a593Smuzhiyun mtu_log[i] = MTUWIDTH_G(v);
5778*4882a593Smuzhiyun }
5779*4882a593Smuzhiyun }
5780*4882a593Smuzhiyun
5781*4882a593Smuzhiyun /**
5782*4882a593Smuzhiyun * t4_read_cong_tbl - reads the congestion control table
5783*4882a593Smuzhiyun * @adap: the adapter
5784*4882a593Smuzhiyun * @incr: where to store the alpha values
5785*4882a593Smuzhiyun *
5786*4882a593Smuzhiyun * Reads the additive increments programmed into the HW congestion
5787*4882a593Smuzhiyun * control table.
5788*4882a593Smuzhiyun */
t4_read_cong_tbl(struct adapter * adap,u16 incr[NMTUS][NCCTRL_WIN])5789*4882a593Smuzhiyun void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5790*4882a593Smuzhiyun {
5791*4882a593Smuzhiyun unsigned int mtu, w;
5792*4882a593Smuzhiyun
5793*4882a593Smuzhiyun for (mtu = 0; mtu < NMTUS; ++mtu)
5794*4882a593Smuzhiyun for (w = 0; w < NCCTRL_WIN; ++w) {
5795*4882a593Smuzhiyun t4_write_reg(adap, TP_CCTRL_TABLE_A,
5796*4882a593Smuzhiyun ROWINDEX_V(0xffff) | (mtu << 5) | w);
5797*4882a593Smuzhiyun incr[mtu][w] = (u16)t4_read_reg(adap,
5798*4882a593Smuzhiyun TP_CCTRL_TABLE_A) & 0x1fff;
5799*4882a593Smuzhiyun }
5800*4882a593Smuzhiyun }
5801*4882a593Smuzhiyun
5802*4882a593Smuzhiyun /**
5803*4882a593Smuzhiyun * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5804*4882a593Smuzhiyun * @adap: the adapter
5805*4882a593Smuzhiyun * @addr: the indirect TP register address
5806*4882a593Smuzhiyun * @mask: specifies the field within the register to modify
5807*4882a593Smuzhiyun * @val: new value for the field
5808*4882a593Smuzhiyun *
5809*4882a593Smuzhiyun * Sets a field of an indirect TP register to the given value.
5810*4882a593Smuzhiyun */
t4_tp_wr_bits_indirect(struct adapter * adap,unsigned int addr,unsigned int mask,unsigned int val)5811*4882a593Smuzhiyun void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5812*4882a593Smuzhiyun unsigned int mask, unsigned int val)
5813*4882a593Smuzhiyun {
5814*4882a593Smuzhiyun t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5815*4882a593Smuzhiyun val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5816*4882a593Smuzhiyun t4_write_reg(adap, TP_PIO_DATA_A, val);
5817*4882a593Smuzhiyun }
5818*4882a593Smuzhiyun
5819*4882a593Smuzhiyun /**
5820*4882a593Smuzhiyun * init_cong_ctrl - initialize congestion control parameters
5821*4882a593Smuzhiyun * @a: the alpha values for congestion control
5822*4882a593Smuzhiyun * @b: the beta values for congestion control
5823*4882a593Smuzhiyun *
5824*4882a593Smuzhiyun * Initialize the congestion control parameters.
5825*4882a593Smuzhiyun */
init_cong_ctrl(unsigned short * a,unsigned short * b)5826*4882a593Smuzhiyun static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5827*4882a593Smuzhiyun {
5828*4882a593Smuzhiyun a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5829*4882a593Smuzhiyun a[9] = 2;
5830*4882a593Smuzhiyun a[10] = 3;
5831*4882a593Smuzhiyun a[11] = 4;
5832*4882a593Smuzhiyun a[12] = 5;
5833*4882a593Smuzhiyun a[13] = 6;
5834*4882a593Smuzhiyun a[14] = 7;
5835*4882a593Smuzhiyun a[15] = 8;
5836*4882a593Smuzhiyun a[16] = 9;
5837*4882a593Smuzhiyun a[17] = 10;
5838*4882a593Smuzhiyun a[18] = 14;
5839*4882a593Smuzhiyun a[19] = 17;
5840*4882a593Smuzhiyun a[20] = 21;
5841*4882a593Smuzhiyun a[21] = 25;
5842*4882a593Smuzhiyun a[22] = 30;
5843*4882a593Smuzhiyun a[23] = 35;
5844*4882a593Smuzhiyun a[24] = 45;
5845*4882a593Smuzhiyun a[25] = 60;
5846*4882a593Smuzhiyun a[26] = 80;
5847*4882a593Smuzhiyun a[27] = 100;
5848*4882a593Smuzhiyun a[28] = 200;
5849*4882a593Smuzhiyun a[29] = 300;
5850*4882a593Smuzhiyun a[30] = 400;
5851*4882a593Smuzhiyun a[31] = 500;
5852*4882a593Smuzhiyun
5853*4882a593Smuzhiyun b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5854*4882a593Smuzhiyun b[9] = b[10] = 1;
5855*4882a593Smuzhiyun b[11] = b[12] = 2;
5856*4882a593Smuzhiyun b[13] = b[14] = b[15] = b[16] = 3;
5857*4882a593Smuzhiyun b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5858*4882a593Smuzhiyun b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5859*4882a593Smuzhiyun b[28] = b[29] = 6;
5860*4882a593Smuzhiyun b[30] = b[31] = 7;
5861*4882a593Smuzhiyun }
5862*4882a593Smuzhiyun
5863*4882a593Smuzhiyun /* The minimum additive increment value for the congestion control table */
5864*4882a593Smuzhiyun #define CC_MIN_INCR 2U
5865*4882a593Smuzhiyun
5866*4882a593Smuzhiyun /**
5867*4882a593Smuzhiyun * t4_load_mtus - write the MTU and congestion control HW tables
5868*4882a593Smuzhiyun * @adap: the adapter
5869*4882a593Smuzhiyun * @mtus: the values for the MTU table
5870*4882a593Smuzhiyun * @alpha: the values for the congestion control alpha parameter
5871*4882a593Smuzhiyun * @beta: the values for the congestion control beta parameter
5872*4882a593Smuzhiyun *
5873*4882a593Smuzhiyun * Write the HW MTU table with the supplied MTUs and the high-speed
5874*4882a593Smuzhiyun * congestion control table with the supplied alpha, beta, and MTUs.
5875*4882a593Smuzhiyun * We write the two tables together because the additive increments
5876*4882a593Smuzhiyun * depend on the MTUs.
5877*4882a593Smuzhiyun */
t4_load_mtus(struct adapter * adap,const unsigned short * mtus,const unsigned short * alpha,const unsigned short * beta)5878*4882a593Smuzhiyun void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5879*4882a593Smuzhiyun const unsigned short *alpha, const unsigned short *beta)
5880*4882a593Smuzhiyun {
5881*4882a593Smuzhiyun static const unsigned int avg_pkts[NCCTRL_WIN] = {
5882*4882a593Smuzhiyun 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5883*4882a593Smuzhiyun 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5884*4882a593Smuzhiyun 28672, 40960, 57344, 81920, 114688, 163840, 229376
5885*4882a593Smuzhiyun };
5886*4882a593Smuzhiyun
5887*4882a593Smuzhiyun unsigned int i, w;
5888*4882a593Smuzhiyun
5889*4882a593Smuzhiyun for (i = 0; i < NMTUS; ++i) {
5890*4882a593Smuzhiyun unsigned int mtu = mtus[i];
5891*4882a593Smuzhiyun unsigned int log2 = fls(mtu);
5892*4882a593Smuzhiyun
5893*4882a593Smuzhiyun if (!(mtu & ((1 << log2) >> 2))) /* round */
5894*4882a593Smuzhiyun log2--;
5895*4882a593Smuzhiyun t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5896*4882a593Smuzhiyun MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5897*4882a593Smuzhiyun
5898*4882a593Smuzhiyun for (w = 0; w < NCCTRL_WIN; ++w) {
5899*4882a593Smuzhiyun unsigned int inc;
5900*4882a593Smuzhiyun
5901*4882a593Smuzhiyun inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5902*4882a593Smuzhiyun CC_MIN_INCR);
5903*4882a593Smuzhiyun
5904*4882a593Smuzhiyun t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5905*4882a593Smuzhiyun (w << 16) | (beta[w] << 13) | inc);
5906*4882a593Smuzhiyun }
5907*4882a593Smuzhiyun }
5908*4882a593Smuzhiyun }
5909*4882a593Smuzhiyun
5910*4882a593Smuzhiyun /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5911*4882a593Smuzhiyun * clocks. The formula is
5912*4882a593Smuzhiyun *
5913*4882a593Smuzhiyun * bytes/s = bytes256 * 256 * ClkFreq / 4096
5914*4882a593Smuzhiyun *
5915*4882a593Smuzhiyun * which is equivalent to
5916*4882a593Smuzhiyun *
5917*4882a593Smuzhiyun * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5918*4882a593Smuzhiyun */
chan_rate(struct adapter * adap,unsigned int bytes256)5919*4882a593Smuzhiyun static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5920*4882a593Smuzhiyun {
5921*4882a593Smuzhiyun u64 v = bytes256 * adap->params.vpd.cclk;
5922*4882a593Smuzhiyun
5923*4882a593Smuzhiyun return v * 62 + v / 2;
5924*4882a593Smuzhiyun }
5925*4882a593Smuzhiyun
5926*4882a593Smuzhiyun /**
5927*4882a593Smuzhiyun * t4_get_chan_txrate - get the current per channel Tx rates
5928*4882a593Smuzhiyun * @adap: the adapter
5929*4882a593Smuzhiyun * @nic_rate: rates for NIC traffic
5930*4882a593Smuzhiyun * @ofld_rate: rates for offloaded traffic
5931*4882a593Smuzhiyun *
5932*4882a593Smuzhiyun * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5933*4882a593Smuzhiyun * for each channel.
5934*4882a593Smuzhiyun */
t4_get_chan_txrate(struct adapter * adap,u64 * nic_rate,u64 * ofld_rate)5935*4882a593Smuzhiyun void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5936*4882a593Smuzhiyun {
5937*4882a593Smuzhiyun u32 v;
5938*4882a593Smuzhiyun
5939*4882a593Smuzhiyun v = t4_read_reg(adap, TP_TX_TRATE_A);
5940*4882a593Smuzhiyun nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5941*4882a593Smuzhiyun nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5942*4882a593Smuzhiyun if (adap->params.arch.nchan == NCHAN) {
5943*4882a593Smuzhiyun nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5944*4882a593Smuzhiyun nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5945*4882a593Smuzhiyun }
5946*4882a593Smuzhiyun
5947*4882a593Smuzhiyun v = t4_read_reg(adap, TP_TX_ORATE_A);
5948*4882a593Smuzhiyun ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5949*4882a593Smuzhiyun ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5950*4882a593Smuzhiyun if (adap->params.arch.nchan == NCHAN) {
5951*4882a593Smuzhiyun ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5952*4882a593Smuzhiyun ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5953*4882a593Smuzhiyun }
5954*4882a593Smuzhiyun }
5955*4882a593Smuzhiyun
5956*4882a593Smuzhiyun /**
5957*4882a593Smuzhiyun * t4_set_trace_filter - configure one of the tracing filters
5958*4882a593Smuzhiyun * @adap: the adapter
5959*4882a593Smuzhiyun * @tp: the desired trace filter parameters
5960*4882a593Smuzhiyun * @idx: which filter to configure
5961*4882a593Smuzhiyun * @enable: whether to enable or disable the filter
5962*4882a593Smuzhiyun *
5963*4882a593Smuzhiyun * Configures one of the tracing filters available in HW. If @enable is
5964*4882a593Smuzhiyun * %0 @tp is not examined and may be %NULL. The user is responsible to
5965*4882a593Smuzhiyun * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5966*4882a593Smuzhiyun */
t4_set_trace_filter(struct adapter * adap,const struct trace_params * tp,int idx,int enable)5967*4882a593Smuzhiyun int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5968*4882a593Smuzhiyun int idx, int enable)
5969*4882a593Smuzhiyun {
5970*4882a593Smuzhiyun int i, ofst = idx * 4;
5971*4882a593Smuzhiyun u32 data_reg, mask_reg, cfg;
5972*4882a593Smuzhiyun
5973*4882a593Smuzhiyun if (!enable) {
5974*4882a593Smuzhiyun t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5975*4882a593Smuzhiyun return 0;
5976*4882a593Smuzhiyun }
5977*4882a593Smuzhiyun
5978*4882a593Smuzhiyun cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5979*4882a593Smuzhiyun if (cfg & TRCMULTIFILTER_F) {
5980*4882a593Smuzhiyun /* If multiple tracers are enabled, then maximum
5981*4882a593Smuzhiyun * capture size is 2.5KB (FIFO size of a single channel)
5982*4882a593Smuzhiyun * minus 2 flits for CPL_TRACE_PKT header.
5983*4882a593Smuzhiyun */
5984*4882a593Smuzhiyun if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5985*4882a593Smuzhiyun return -EINVAL;
5986*4882a593Smuzhiyun } else {
5987*4882a593Smuzhiyun /* If multiple tracers are disabled, to avoid deadlocks
5988*4882a593Smuzhiyun * maximum packet capture size of 9600 bytes is recommended.
5989*4882a593Smuzhiyun * Also in this mode, only trace0 can be enabled and running.
5990*4882a593Smuzhiyun */
5991*4882a593Smuzhiyun if (tp->snap_len > 9600 || idx)
5992*4882a593Smuzhiyun return -EINVAL;
5993*4882a593Smuzhiyun }
5994*4882a593Smuzhiyun
5995*4882a593Smuzhiyun if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5996*4882a593Smuzhiyun tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5997*4882a593Smuzhiyun tp->min_len > TFMINPKTSIZE_M)
5998*4882a593Smuzhiyun return -EINVAL;
5999*4882a593Smuzhiyun
6000*4882a593Smuzhiyun /* stop the tracer we'll be changing */
6001*4882a593Smuzhiyun t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
6002*4882a593Smuzhiyun
6003*4882a593Smuzhiyun idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
6004*4882a593Smuzhiyun data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
6005*4882a593Smuzhiyun mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
6006*4882a593Smuzhiyun
6007*4882a593Smuzhiyun for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6008*4882a593Smuzhiyun t4_write_reg(adap, data_reg, tp->data[i]);
6009*4882a593Smuzhiyun t4_write_reg(adap, mask_reg, ~tp->mask[i]);
6010*4882a593Smuzhiyun }
6011*4882a593Smuzhiyun t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
6012*4882a593Smuzhiyun TFCAPTUREMAX_V(tp->snap_len) |
6013*4882a593Smuzhiyun TFMINPKTSIZE_V(tp->min_len));
6014*4882a593Smuzhiyun t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
6015*4882a593Smuzhiyun TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
6016*4882a593Smuzhiyun (is_t4(adap->params.chip) ?
6017*4882a593Smuzhiyun TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
6018*4882a593Smuzhiyun T5_TFPORT_V(tp->port) | T5_TFEN_F |
6019*4882a593Smuzhiyun T5_TFINVERTMATCH_V(tp->invert)));
6020*4882a593Smuzhiyun
6021*4882a593Smuzhiyun return 0;
6022*4882a593Smuzhiyun }
6023*4882a593Smuzhiyun
6024*4882a593Smuzhiyun /**
6025*4882a593Smuzhiyun * t4_get_trace_filter - query one of the tracing filters
6026*4882a593Smuzhiyun * @adap: the adapter
6027*4882a593Smuzhiyun * @tp: the current trace filter parameters
6028*4882a593Smuzhiyun * @idx: which trace filter to query
6029*4882a593Smuzhiyun * @enabled: non-zero if the filter is enabled
6030*4882a593Smuzhiyun *
6031*4882a593Smuzhiyun * Returns the current settings of one of the HW tracing filters.
6032*4882a593Smuzhiyun */
t4_get_trace_filter(struct adapter * adap,struct trace_params * tp,int idx,int * enabled)6033*4882a593Smuzhiyun void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
6034*4882a593Smuzhiyun int *enabled)
6035*4882a593Smuzhiyun {
6036*4882a593Smuzhiyun u32 ctla, ctlb;
6037*4882a593Smuzhiyun int i, ofst = idx * 4;
6038*4882a593Smuzhiyun u32 data_reg, mask_reg;
6039*4882a593Smuzhiyun
6040*4882a593Smuzhiyun ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
6041*4882a593Smuzhiyun ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
6042*4882a593Smuzhiyun
6043*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
6044*4882a593Smuzhiyun *enabled = !!(ctla & TFEN_F);
6045*4882a593Smuzhiyun tp->port = TFPORT_G(ctla);
6046*4882a593Smuzhiyun tp->invert = !!(ctla & TFINVERTMATCH_F);
6047*4882a593Smuzhiyun } else {
6048*4882a593Smuzhiyun *enabled = !!(ctla & T5_TFEN_F);
6049*4882a593Smuzhiyun tp->port = T5_TFPORT_G(ctla);
6050*4882a593Smuzhiyun tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
6051*4882a593Smuzhiyun }
6052*4882a593Smuzhiyun tp->snap_len = TFCAPTUREMAX_G(ctlb);
6053*4882a593Smuzhiyun tp->min_len = TFMINPKTSIZE_G(ctlb);
6054*4882a593Smuzhiyun tp->skip_ofst = TFOFFSET_G(ctla);
6055*4882a593Smuzhiyun tp->skip_len = TFLENGTH_G(ctla);
6056*4882a593Smuzhiyun
6057*4882a593Smuzhiyun ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
6058*4882a593Smuzhiyun data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
6059*4882a593Smuzhiyun mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
6060*4882a593Smuzhiyun
6061*4882a593Smuzhiyun for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
6062*4882a593Smuzhiyun tp->mask[i] = ~t4_read_reg(adap, mask_reg);
6063*4882a593Smuzhiyun tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
6064*4882a593Smuzhiyun }
6065*4882a593Smuzhiyun }
6066*4882a593Smuzhiyun
6067*4882a593Smuzhiyun /**
6068*4882a593Smuzhiyun * t4_pmtx_get_stats - returns the HW stats from PMTX
6069*4882a593Smuzhiyun * @adap: the adapter
6070*4882a593Smuzhiyun * @cnt: where to store the count statistics
6071*4882a593Smuzhiyun * @cycles: where to store the cycle statistics
6072*4882a593Smuzhiyun *
6073*4882a593Smuzhiyun * Returns performance statistics from PMTX.
6074*4882a593Smuzhiyun */
t4_pmtx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6075*4882a593Smuzhiyun void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6076*4882a593Smuzhiyun {
6077*4882a593Smuzhiyun int i;
6078*4882a593Smuzhiyun u32 data[2];
6079*4882a593Smuzhiyun
6080*4882a593Smuzhiyun for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6081*4882a593Smuzhiyun t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
6082*4882a593Smuzhiyun cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
6083*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
6084*4882a593Smuzhiyun cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
6085*4882a593Smuzhiyun } else {
6086*4882a593Smuzhiyun t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
6087*4882a593Smuzhiyun PM_TX_DBG_DATA_A, data, 2,
6088*4882a593Smuzhiyun PM_TX_DBG_STAT_MSB_A);
6089*4882a593Smuzhiyun cycles[i] = (((u64)data[0] << 32) | data[1]);
6090*4882a593Smuzhiyun }
6091*4882a593Smuzhiyun }
6092*4882a593Smuzhiyun }
6093*4882a593Smuzhiyun
6094*4882a593Smuzhiyun /**
6095*4882a593Smuzhiyun * t4_pmrx_get_stats - returns the HW stats from PMRX
6096*4882a593Smuzhiyun * @adap: the adapter
6097*4882a593Smuzhiyun * @cnt: where to store the count statistics
6098*4882a593Smuzhiyun * @cycles: where to store the cycle statistics
6099*4882a593Smuzhiyun *
6100*4882a593Smuzhiyun * Returns performance statistics from PMRX.
6101*4882a593Smuzhiyun */
t4_pmrx_get_stats(struct adapter * adap,u32 cnt[],u64 cycles[])6102*4882a593Smuzhiyun void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6103*4882a593Smuzhiyun {
6104*4882a593Smuzhiyun int i;
6105*4882a593Smuzhiyun u32 data[2];
6106*4882a593Smuzhiyun
6107*4882a593Smuzhiyun for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6108*4882a593Smuzhiyun t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6109*4882a593Smuzhiyun cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6110*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
6111*4882a593Smuzhiyun cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6112*4882a593Smuzhiyun } else {
6113*4882a593Smuzhiyun t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6114*4882a593Smuzhiyun PM_RX_DBG_DATA_A, data, 2,
6115*4882a593Smuzhiyun PM_RX_DBG_STAT_MSB_A);
6116*4882a593Smuzhiyun cycles[i] = (((u64)data[0] << 32) | data[1]);
6117*4882a593Smuzhiyun }
6118*4882a593Smuzhiyun }
6119*4882a593Smuzhiyun }
6120*4882a593Smuzhiyun
6121*4882a593Smuzhiyun /**
6122*4882a593Smuzhiyun * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6123*4882a593Smuzhiyun * @adapter: the adapter
6124*4882a593Smuzhiyun * @pidx: the port index
6125*4882a593Smuzhiyun *
6126*4882a593Smuzhiyun * Computes and returns a bitmap indicating which MPS buffer groups are
6127*4882a593Smuzhiyun * associated with the given Port. Bit i is set if buffer group i is
6128*4882a593Smuzhiyun * used by the Port.
6129*4882a593Smuzhiyun */
compute_mps_bg_map(struct adapter * adapter,int pidx)6130*4882a593Smuzhiyun static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6131*4882a593Smuzhiyun int pidx)
6132*4882a593Smuzhiyun {
6133*4882a593Smuzhiyun unsigned int chip_version, nports;
6134*4882a593Smuzhiyun
6135*4882a593Smuzhiyun chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6136*4882a593Smuzhiyun nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6137*4882a593Smuzhiyun
6138*4882a593Smuzhiyun switch (chip_version) {
6139*4882a593Smuzhiyun case CHELSIO_T4:
6140*4882a593Smuzhiyun case CHELSIO_T5:
6141*4882a593Smuzhiyun switch (nports) {
6142*4882a593Smuzhiyun case 1: return 0xf;
6143*4882a593Smuzhiyun case 2: return 3 << (2 * pidx);
6144*4882a593Smuzhiyun case 4: return 1 << pidx;
6145*4882a593Smuzhiyun }
6146*4882a593Smuzhiyun break;
6147*4882a593Smuzhiyun
6148*4882a593Smuzhiyun case CHELSIO_T6:
6149*4882a593Smuzhiyun switch (nports) {
6150*4882a593Smuzhiyun case 2: return 1 << (2 * pidx);
6151*4882a593Smuzhiyun }
6152*4882a593Smuzhiyun break;
6153*4882a593Smuzhiyun }
6154*4882a593Smuzhiyun
6155*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6156*4882a593Smuzhiyun chip_version, nports);
6157*4882a593Smuzhiyun
6158*4882a593Smuzhiyun return 0;
6159*4882a593Smuzhiyun }
6160*4882a593Smuzhiyun
6161*4882a593Smuzhiyun /**
6162*4882a593Smuzhiyun * t4_get_mps_bg_map - return the buffer groups associated with a port
6163*4882a593Smuzhiyun * @adapter: the adapter
6164*4882a593Smuzhiyun * @pidx: the port index
6165*4882a593Smuzhiyun *
6166*4882a593Smuzhiyun * Returns a bitmap indicating which MPS buffer groups are associated
6167*4882a593Smuzhiyun * with the given Port. Bit i is set if buffer group i is used by the
6168*4882a593Smuzhiyun * Port.
6169*4882a593Smuzhiyun */
t4_get_mps_bg_map(struct adapter * adapter,int pidx)6170*4882a593Smuzhiyun unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6171*4882a593Smuzhiyun {
6172*4882a593Smuzhiyun u8 *mps_bg_map;
6173*4882a593Smuzhiyun unsigned int nports;
6174*4882a593Smuzhiyun
6175*4882a593Smuzhiyun nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6176*4882a593Smuzhiyun if (pidx >= nports) {
6177*4882a593Smuzhiyun CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6178*4882a593Smuzhiyun pidx, nports);
6179*4882a593Smuzhiyun return 0;
6180*4882a593Smuzhiyun }
6181*4882a593Smuzhiyun
6182*4882a593Smuzhiyun /* If we've already retrieved/computed this, just return the result.
6183*4882a593Smuzhiyun */
6184*4882a593Smuzhiyun mps_bg_map = adapter->params.mps_bg_map;
6185*4882a593Smuzhiyun if (mps_bg_map[pidx])
6186*4882a593Smuzhiyun return mps_bg_map[pidx];
6187*4882a593Smuzhiyun
6188*4882a593Smuzhiyun /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6189*4882a593Smuzhiyun * If we're talking to such Firmware, let it tell us. If the new
6190*4882a593Smuzhiyun * API isn't supported, revert back to old hardcoded way. The value
6191*4882a593Smuzhiyun * obtained from Firmware is encoded in below format:
6192*4882a593Smuzhiyun *
6193*4882a593Smuzhiyun * val = (( MPSBGMAP[Port 3] << 24 ) |
6194*4882a593Smuzhiyun * ( MPSBGMAP[Port 2] << 16 ) |
6195*4882a593Smuzhiyun * ( MPSBGMAP[Port 1] << 8 ) |
6196*4882a593Smuzhiyun * ( MPSBGMAP[Port 0] << 0 ))
6197*4882a593Smuzhiyun */
6198*4882a593Smuzhiyun if (adapter->flags & CXGB4_FW_OK) {
6199*4882a593Smuzhiyun u32 param, val;
6200*4882a593Smuzhiyun int ret;
6201*4882a593Smuzhiyun
6202*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6203*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6204*4882a593Smuzhiyun ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6205*4882a593Smuzhiyun 0, 1, ¶m, &val);
6206*4882a593Smuzhiyun if (!ret) {
6207*4882a593Smuzhiyun int p;
6208*4882a593Smuzhiyun
6209*4882a593Smuzhiyun /* Store the BG Map for all of the Ports in order to
6210*4882a593Smuzhiyun * avoid more calls to the Firmware in the future.
6211*4882a593Smuzhiyun */
6212*4882a593Smuzhiyun for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6213*4882a593Smuzhiyun mps_bg_map[p] = val & 0xff;
6214*4882a593Smuzhiyun
6215*4882a593Smuzhiyun return mps_bg_map[pidx];
6216*4882a593Smuzhiyun }
6217*4882a593Smuzhiyun }
6218*4882a593Smuzhiyun
6219*4882a593Smuzhiyun /* Either we're not talking to the Firmware or we're dealing with
6220*4882a593Smuzhiyun * older Firmware which doesn't support the new API to get the MPS
6221*4882a593Smuzhiyun * Buffer Group Map. Fall back to computing it ourselves.
6222*4882a593Smuzhiyun */
6223*4882a593Smuzhiyun mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6224*4882a593Smuzhiyun return mps_bg_map[pidx];
6225*4882a593Smuzhiyun }
6226*4882a593Smuzhiyun
6227*4882a593Smuzhiyun /**
6228*4882a593Smuzhiyun * t4_get_tp_e2c_map - return the E2C channel map associated with a port
6229*4882a593Smuzhiyun * @adapter: the adapter
6230*4882a593Smuzhiyun * @pidx: the port index
6231*4882a593Smuzhiyun */
t4_get_tp_e2c_map(struct adapter * adapter,int pidx)6232*4882a593Smuzhiyun static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
6233*4882a593Smuzhiyun {
6234*4882a593Smuzhiyun unsigned int nports;
6235*4882a593Smuzhiyun u32 param, val = 0;
6236*4882a593Smuzhiyun int ret;
6237*4882a593Smuzhiyun
6238*4882a593Smuzhiyun nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6239*4882a593Smuzhiyun if (pidx >= nports) {
6240*4882a593Smuzhiyun CH_WARN(adapter, "TP E2C Channel Port Index %d >= Nports %d\n",
6241*4882a593Smuzhiyun pidx, nports);
6242*4882a593Smuzhiyun return 0;
6243*4882a593Smuzhiyun }
6244*4882a593Smuzhiyun
6245*4882a593Smuzhiyun /* FW version >= 1.16.44.0 can determine E2C channel map using
6246*4882a593Smuzhiyun * FW_PARAMS_PARAM_DEV_TPCHMAP API.
6247*4882a593Smuzhiyun */
6248*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6249*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPCHMAP));
6250*4882a593Smuzhiyun ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6251*4882a593Smuzhiyun 0, 1, ¶m, &val);
6252*4882a593Smuzhiyun if (!ret)
6253*4882a593Smuzhiyun return (val >> (8 * pidx)) & 0xff;
6254*4882a593Smuzhiyun
6255*4882a593Smuzhiyun return 0;
6256*4882a593Smuzhiyun }
6257*4882a593Smuzhiyun
6258*4882a593Smuzhiyun /**
6259*4882a593Smuzhiyun * t4_get_tp_ch_map - return TP ingress channels associated with a port
6260*4882a593Smuzhiyun * @adap: the adapter
6261*4882a593Smuzhiyun * @pidx: the port index
6262*4882a593Smuzhiyun *
6263*4882a593Smuzhiyun * Returns a bitmap indicating which TP Ingress Channels are associated
6264*4882a593Smuzhiyun * with a given Port. Bit i is set if TP Ingress Channel i is used by
6265*4882a593Smuzhiyun * the Port.
6266*4882a593Smuzhiyun */
t4_get_tp_ch_map(struct adapter * adap,int pidx)6267*4882a593Smuzhiyun unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6268*4882a593Smuzhiyun {
6269*4882a593Smuzhiyun unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6270*4882a593Smuzhiyun unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6271*4882a593Smuzhiyun
6272*4882a593Smuzhiyun if (pidx >= nports) {
6273*4882a593Smuzhiyun dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6274*4882a593Smuzhiyun pidx, nports);
6275*4882a593Smuzhiyun return 0;
6276*4882a593Smuzhiyun }
6277*4882a593Smuzhiyun
6278*4882a593Smuzhiyun switch (chip_version) {
6279*4882a593Smuzhiyun case CHELSIO_T4:
6280*4882a593Smuzhiyun case CHELSIO_T5:
6281*4882a593Smuzhiyun /* Note that this happens to be the same values as the MPS
6282*4882a593Smuzhiyun * Buffer Group Map for these Chips. But we replicate the code
6283*4882a593Smuzhiyun * here because they're really separate concepts.
6284*4882a593Smuzhiyun */
6285*4882a593Smuzhiyun switch (nports) {
6286*4882a593Smuzhiyun case 1: return 0xf;
6287*4882a593Smuzhiyun case 2: return 3 << (2 * pidx);
6288*4882a593Smuzhiyun case 4: return 1 << pidx;
6289*4882a593Smuzhiyun }
6290*4882a593Smuzhiyun break;
6291*4882a593Smuzhiyun
6292*4882a593Smuzhiyun case CHELSIO_T6:
6293*4882a593Smuzhiyun switch (nports) {
6294*4882a593Smuzhiyun case 1:
6295*4882a593Smuzhiyun case 2: return 1 << pidx;
6296*4882a593Smuzhiyun }
6297*4882a593Smuzhiyun break;
6298*4882a593Smuzhiyun }
6299*4882a593Smuzhiyun
6300*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6301*4882a593Smuzhiyun chip_version, nports);
6302*4882a593Smuzhiyun return 0;
6303*4882a593Smuzhiyun }
6304*4882a593Smuzhiyun
6305*4882a593Smuzhiyun /**
6306*4882a593Smuzhiyun * t4_get_port_type_description - return Port Type string description
6307*4882a593Smuzhiyun * @port_type: firmware Port Type enumeration
6308*4882a593Smuzhiyun */
t4_get_port_type_description(enum fw_port_type port_type)6309*4882a593Smuzhiyun const char *t4_get_port_type_description(enum fw_port_type port_type)
6310*4882a593Smuzhiyun {
6311*4882a593Smuzhiyun static const char *const port_type_description[] = {
6312*4882a593Smuzhiyun "Fiber_XFI",
6313*4882a593Smuzhiyun "Fiber_XAUI",
6314*4882a593Smuzhiyun "BT_SGMII",
6315*4882a593Smuzhiyun "BT_XFI",
6316*4882a593Smuzhiyun "BT_XAUI",
6317*4882a593Smuzhiyun "KX4",
6318*4882a593Smuzhiyun "CX4",
6319*4882a593Smuzhiyun "KX",
6320*4882a593Smuzhiyun "KR",
6321*4882a593Smuzhiyun "SFP",
6322*4882a593Smuzhiyun "BP_AP",
6323*4882a593Smuzhiyun "BP4_AP",
6324*4882a593Smuzhiyun "QSFP_10G",
6325*4882a593Smuzhiyun "QSA",
6326*4882a593Smuzhiyun "QSFP",
6327*4882a593Smuzhiyun "BP40_BA",
6328*4882a593Smuzhiyun "KR4_100G",
6329*4882a593Smuzhiyun "CR4_QSFP",
6330*4882a593Smuzhiyun "CR_QSFP",
6331*4882a593Smuzhiyun "CR2_QSFP",
6332*4882a593Smuzhiyun "SFP28",
6333*4882a593Smuzhiyun "KR_SFP28",
6334*4882a593Smuzhiyun "KR_XLAUI"
6335*4882a593Smuzhiyun };
6336*4882a593Smuzhiyun
6337*4882a593Smuzhiyun if (port_type < ARRAY_SIZE(port_type_description))
6338*4882a593Smuzhiyun return port_type_description[port_type];
6339*4882a593Smuzhiyun return "UNKNOWN";
6340*4882a593Smuzhiyun }
6341*4882a593Smuzhiyun
6342*4882a593Smuzhiyun /**
6343*4882a593Smuzhiyun * t4_get_port_stats_offset - collect port stats relative to a previous
6344*4882a593Smuzhiyun * snapshot
6345*4882a593Smuzhiyun * @adap: The adapter
6346*4882a593Smuzhiyun * @idx: The port
6347*4882a593Smuzhiyun * @stats: Current stats to fill
6348*4882a593Smuzhiyun * @offset: Previous stats snapshot
6349*4882a593Smuzhiyun */
t4_get_port_stats_offset(struct adapter * adap,int idx,struct port_stats * stats,struct port_stats * offset)6350*4882a593Smuzhiyun void t4_get_port_stats_offset(struct adapter *adap, int idx,
6351*4882a593Smuzhiyun struct port_stats *stats,
6352*4882a593Smuzhiyun struct port_stats *offset)
6353*4882a593Smuzhiyun {
6354*4882a593Smuzhiyun u64 *s, *o;
6355*4882a593Smuzhiyun int i;
6356*4882a593Smuzhiyun
6357*4882a593Smuzhiyun t4_get_port_stats(adap, idx, stats);
6358*4882a593Smuzhiyun for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6359*4882a593Smuzhiyun i < (sizeof(struct port_stats) / sizeof(u64));
6360*4882a593Smuzhiyun i++, s++, o++)
6361*4882a593Smuzhiyun *s -= *o;
6362*4882a593Smuzhiyun }
6363*4882a593Smuzhiyun
6364*4882a593Smuzhiyun /**
6365*4882a593Smuzhiyun * t4_get_port_stats - collect port statistics
6366*4882a593Smuzhiyun * @adap: the adapter
6367*4882a593Smuzhiyun * @idx: the port index
6368*4882a593Smuzhiyun * @p: the stats structure to fill
6369*4882a593Smuzhiyun *
6370*4882a593Smuzhiyun * Collect statistics related to the given port from HW.
6371*4882a593Smuzhiyun */
t4_get_port_stats(struct adapter * adap,int idx,struct port_stats * p)6372*4882a593Smuzhiyun void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6373*4882a593Smuzhiyun {
6374*4882a593Smuzhiyun u32 bgmap = t4_get_mps_bg_map(adap, idx);
6375*4882a593Smuzhiyun u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6376*4882a593Smuzhiyun
6377*4882a593Smuzhiyun #define GET_STAT(name) \
6378*4882a593Smuzhiyun t4_read_reg64(adap, \
6379*4882a593Smuzhiyun (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6380*4882a593Smuzhiyun T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6381*4882a593Smuzhiyun #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6382*4882a593Smuzhiyun
6383*4882a593Smuzhiyun p->tx_octets = GET_STAT(TX_PORT_BYTES);
6384*4882a593Smuzhiyun p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6385*4882a593Smuzhiyun p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6386*4882a593Smuzhiyun p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6387*4882a593Smuzhiyun p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6388*4882a593Smuzhiyun p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6389*4882a593Smuzhiyun p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6390*4882a593Smuzhiyun p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6391*4882a593Smuzhiyun p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6392*4882a593Smuzhiyun p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6393*4882a593Smuzhiyun p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6394*4882a593Smuzhiyun p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6395*4882a593Smuzhiyun p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6396*4882a593Smuzhiyun p->tx_drop = GET_STAT(TX_PORT_DROP);
6397*4882a593Smuzhiyun p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6398*4882a593Smuzhiyun p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6399*4882a593Smuzhiyun p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6400*4882a593Smuzhiyun p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6401*4882a593Smuzhiyun p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6402*4882a593Smuzhiyun p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6403*4882a593Smuzhiyun p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6404*4882a593Smuzhiyun p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6405*4882a593Smuzhiyun p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6406*4882a593Smuzhiyun
6407*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6408*4882a593Smuzhiyun if (stat_ctl & COUNTPAUSESTATTX_F)
6409*4882a593Smuzhiyun p->tx_frames_64 -= p->tx_pause;
6410*4882a593Smuzhiyun if (stat_ctl & COUNTPAUSEMCTX_F)
6411*4882a593Smuzhiyun p->tx_mcast_frames -= p->tx_pause;
6412*4882a593Smuzhiyun }
6413*4882a593Smuzhiyun p->rx_octets = GET_STAT(RX_PORT_BYTES);
6414*4882a593Smuzhiyun p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6415*4882a593Smuzhiyun p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6416*4882a593Smuzhiyun p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6417*4882a593Smuzhiyun p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6418*4882a593Smuzhiyun p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6419*4882a593Smuzhiyun p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6420*4882a593Smuzhiyun p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6421*4882a593Smuzhiyun p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6422*4882a593Smuzhiyun p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6423*4882a593Smuzhiyun p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6424*4882a593Smuzhiyun p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6425*4882a593Smuzhiyun p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6426*4882a593Smuzhiyun p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6427*4882a593Smuzhiyun p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6428*4882a593Smuzhiyun p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6429*4882a593Smuzhiyun p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6430*4882a593Smuzhiyun p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6431*4882a593Smuzhiyun p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6432*4882a593Smuzhiyun p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6433*4882a593Smuzhiyun p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6434*4882a593Smuzhiyun p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6435*4882a593Smuzhiyun p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6436*4882a593Smuzhiyun p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6437*4882a593Smuzhiyun p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6438*4882a593Smuzhiyun p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6439*4882a593Smuzhiyun p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6440*4882a593Smuzhiyun
6441*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6442*4882a593Smuzhiyun if (stat_ctl & COUNTPAUSESTATRX_F)
6443*4882a593Smuzhiyun p->rx_frames_64 -= p->rx_pause;
6444*4882a593Smuzhiyun if (stat_ctl & COUNTPAUSEMCRX_F)
6445*4882a593Smuzhiyun p->rx_mcast_frames -= p->rx_pause;
6446*4882a593Smuzhiyun }
6447*4882a593Smuzhiyun
6448*4882a593Smuzhiyun p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6449*4882a593Smuzhiyun p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6450*4882a593Smuzhiyun p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6451*4882a593Smuzhiyun p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6452*4882a593Smuzhiyun p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6453*4882a593Smuzhiyun p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6454*4882a593Smuzhiyun p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6455*4882a593Smuzhiyun p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6456*4882a593Smuzhiyun
6457*4882a593Smuzhiyun #undef GET_STAT
6458*4882a593Smuzhiyun #undef GET_STAT_COM
6459*4882a593Smuzhiyun }
6460*4882a593Smuzhiyun
6461*4882a593Smuzhiyun /**
6462*4882a593Smuzhiyun * t4_get_lb_stats - collect loopback port statistics
6463*4882a593Smuzhiyun * @adap: the adapter
6464*4882a593Smuzhiyun * @idx: the loopback port index
6465*4882a593Smuzhiyun * @p: the stats structure to fill
6466*4882a593Smuzhiyun *
6467*4882a593Smuzhiyun * Return HW statistics for the given loopback port.
6468*4882a593Smuzhiyun */
t4_get_lb_stats(struct adapter * adap,int idx,struct lb_port_stats * p)6469*4882a593Smuzhiyun void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6470*4882a593Smuzhiyun {
6471*4882a593Smuzhiyun u32 bgmap = t4_get_mps_bg_map(adap, idx);
6472*4882a593Smuzhiyun
6473*4882a593Smuzhiyun #define GET_STAT(name) \
6474*4882a593Smuzhiyun t4_read_reg64(adap, \
6475*4882a593Smuzhiyun (is_t4(adap->params.chip) ? \
6476*4882a593Smuzhiyun PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6477*4882a593Smuzhiyun T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6478*4882a593Smuzhiyun #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6479*4882a593Smuzhiyun
6480*4882a593Smuzhiyun p->octets = GET_STAT(BYTES);
6481*4882a593Smuzhiyun p->frames = GET_STAT(FRAMES);
6482*4882a593Smuzhiyun p->bcast_frames = GET_STAT(BCAST);
6483*4882a593Smuzhiyun p->mcast_frames = GET_STAT(MCAST);
6484*4882a593Smuzhiyun p->ucast_frames = GET_STAT(UCAST);
6485*4882a593Smuzhiyun p->error_frames = GET_STAT(ERROR);
6486*4882a593Smuzhiyun
6487*4882a593Smuzhiyun p->frames_64 = GET_STAT(64B);
6488*4882a593Smuzhiyun p->frames_65_127 = GET_STAT(65B_127B);
6489*4882a593Smuzhiyun p->frames_128_255 = GET_STAT(128B_255B);
6490*4882a593Smuzhiyun p->frames_256_511 = GET_STAT(256B_511B);
6491*4882a593Smuzhiyun p->frames_512_1023 = GET_STAT(512B_1023B);
6492*4882a593Smuzhiyun p->frames_1024_1518 = GET_STAT(1024B_1518B);
6493*4882a593Smuzhiyun p->frames_1519_max = GET_STAT(1519B_MAX);
6494*4882a593Smuzhiyun p->drop = GET_STAT(DROP_FRAMES);
6495*4882a593Smuzhiyun
6496*4882a593Smuzhiyun p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6497*4882a593Smuzhiyun p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6498*4882a593Smuzhiyun p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6499*4882a593Smuzhiyun p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6500*4882a593Smuzhiyun p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6501*4882a593Smuzhiyun p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6502*4882a593Smuzhiyun p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6503*4882a593Smuzhiyun p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6504*4882a593Smuzhiyun
6505*4882a593Smuzhiyun #undef GET_STAT
6506*4882a593Smuzhiyun #undef GET_STAT_COM
6507*4882a593Smuzhiyun }
6508*4882a593Smuzhiyun
6509*4882a593Smuzhiyun /* t4_mk_filtdelwr - create a delete filter WR
6510*4882a593Smuzhiyun * @ftid: the filter ID
6511*4882a593Smuzhiyun * @wr: the filter work request to populate
6512*4882a593Smuzhiyun * @qid: ingress queue to receive the delete notification
6513*4882a593Smuzhiyun *
6514*4882a593Smuzhiyun * Creates a filter work request to delete the supplied filter. If @qid is
6515*4882a593Smuzhiyun * negative the delete notification is suppressed.
6516*4882a593Smuzhiyun */
t4_mk_filtdelwr(unsigned int ftid,struct fw_filter_wr * wr,int qid)6517*4882a593Smuzhiyun void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6518*4882a593Smuzhiyun {
6519*4882a593Smuzhiyun memset(wr, 0, sizeof(*wr));
6520*4882a593Smuzhiyun wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6521*4882a593Smuzhiyun wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6522*4882a593Smuzhiyun wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6523*4882a593Smuzhiyun FW_FILTER_WR_NOREPLY_V(qid < 0));
6524*4882a593Smuzhiyun wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6525*4882a593Smuzhiyun if (qid >= 0)
6526*4882a593Smuzhiyun wr->rx_chan_rx_rpl_iq =
6527*4882a593Smuzhiyun cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6528*4882a593Smuzhiyun }
6529*4882a593Smuzhiyun
6530*4882a593Smuzhiyun #define INIT_CMD(var, cmd, rd_wr) do { \
6531*4882a593Smuzhiyun (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6532*4882a593Smuzhiyun FW_CMD_REQUEST_F | \
6533*4882a593Smuzhiyun FW_CMD_##rd_wr##_F); \
6534*4882a593Smuzhiyun (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6535*4882a593Smuzhiyun } while (0)
6536*4882a593Smuzhiyun
t4_fwaddrspace_write(struct adapter * adap,unsigned int mbox,u32 addr,u32 val)6537*4882a593Smuzhiyun int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6538*4882a593Smuzhiyun u32 addr, u32 val)
6539*4882a593Smuzhiyun {
6540*4882a593Smuzhiyun u32 ldst_addrspace;
6541*4882a593Smuzhiyun struct fw_ldst_cmd c;
6542*4882a593Smuzhiyun
6543*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6544*4882a593Smuzhiyun ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6545*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6546*4882a593Smuzhiyun FW_CMD_REQUEST_F |
6547*4882a593Smuzhiyun FW_CMD_WRITE_F |
6548*4882a593Smuzhiyun ldst_addrspace);
6549*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6550*4882a593Smuzhiyun c.u.addrval.addr = cpu_to_be32(addr);
6551*4882a593Smuzhiyun c.u.addrval.val = cpu_to_be32(val);
6552*4882a593Smuzhiyun
6553*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6554*4882a593Smuzhiyun }
6555*4882a593Smuzhiyun
6556*4882a593Smuzhiyun /**
6557*4882a593Smuzhiyun * t4_mdio_rd - read a PHY register through MDIO
6558*4882a593Smuzhiyun * @adap: the adapter
6559*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
6560*4882a593Smuzhiyun * @phy_addr: the PHY address
6561*4882a593Smuzhiyun * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6562*4882a593Smuzhiyun * @reg: the register to read
6563*4882a593Smuzhiyun * @valp: where to store the value
6564*4882a593Smuzhiyun *
6565*4882a593Smuzhiyun * Issues a FW command through the given mailbox to read a PHY register.
6566*4882a593Smuzhiyun */
t4_mdio_rd(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,u16 * valp)6567*4882a593Smuzhiyun int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6568*4882a593Smuzhiyun unsigned int mmd, unsigned int reg, u16 *valp)
6569*4882a593Smuzhiyun {
6570*4882a593Smuzhiyun int ret;
6571*4882a593Smuzhiyun u32 ldst_addrspace;
6572*4882a593Smuzhiyun struct fw_ldst_cmd c;
6573*4882a593Smuzhiyun
6574*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6575*4882a593Smuzhiyun ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6576*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6577*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
6578*4882a593Smuzhiyun ldst_addrspace);
6579*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6580*4882a593Smuzhiyun c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6581*4882a593Smuzhiyun FW_LDST_CMD_MMD_V(mmd));
6582*4882a593Smuzhiyun c.u.mdio.raddr = cpu_to_be16(reg);
6583*4882a593Smuzhiyun
6584*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6585*4882a593Smuzhiyun if (ret == 0)
6586*4882a593Smuzhiyun *valp = be16_to_cpu(c.u.mdio.rval);
6587*4882a593Smuzhiyun return ret;
6588*4882a593Smuzhiyun }
6589*4882a593Smuzhiyun
6590*4882a593Smuzhiyun /**
6591*4882a593Smuzhiyun * t4_mdio_wr - write a PHY register through MDIO
6592*4882a593Smuzhiyun * @adap: the adapter
6593*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
6594*4882a593Smuzhiyun * @phy_addr: the PHY address
6595*4882a593Smuzhiyun * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6596*4882a593Smuzhiyun * @reg: the register to write
6597*4882a593Smuzhiyun * @val: value to write
6598*4882a593Smuzhiyun *
6599*4882a593Smuzhiyun * Issues a FW command through the given mailbox to write a PHY register.
6600*4882a593Smuzhiyun */
t4_mdio_wr(struct adapter * adap,unsigned int mbox,unsigned int phy_addr,unsigned int mmd,unsigned int reg,u16 val)6601*4882a593Smuzhiyun int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6602*4882a593Smuzhiyun unsigned int mmd, unsigned int reg, u16 val)
6603*4882a593Smuzhiyun {
6604*4882a593Smuzhiyun u32 ldst_addrspace;
6605*4882a593Smuzhiyun struct fw_ldst_cmd c;
6606*4882a593Smuzhiyun
6607*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6608*4882a593Smuzhiyun ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6609*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6610*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6611*4882a593Smuzhiyun ldst_addrspace);
6612*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6613*4882a593Smuzhiyun c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6614*4882a593Smuzhiyun FW_LDST_CMD_MMD_V(mmd));
6615*4882a593Smuzhiyun c.u.mdio.raddr = cpu_to_be16(reg);
6616*4882a593Smuzhiyun c.u.mdio.rval = cpu_to_be16(val);
6617*4882a593Smuzhiyun
6618*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6619*4882a593Smuzhiyun }
6620*4882a593Smuzhiyun
6621*4882a593Smuzhiyun /**
6622*4882a593Smuzhiyun * t4_sge_decode_idma_state - decode the idma state
6623*4882a593Smuzhiyun * @adapter: the adapter
6624*4882a593Smuzhiyun * @state: the state idma is stuck in
6625*4882a593Smuzhiyun */
t4_sge_decode_idma_state(struct adapter * adapter,int state)6626*4882a593Smuzhiyun void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6627*4882a593Smuzhiyun {
6628*4882a593Smuzhiyun static const char * const t4_decode[] = {
6629*4882a593Smuzhiyun "IDMA_IDLE",
6630*4882a593Smuzhiyun "IDMA_PUSH_MORE_CPL_FIFO",
6631*4882a593Smuzhiyun "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6632*4882a593Smuzhiyun "Not used",
6633*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PCIEHDR",
6634*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6635*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD",
6636*4882a593Smuzhiyun "IDMA_SEND_FIFO_TO_IMSG",
6637*4882a593Smuzhiyun "IDMA_FL_REQ_DATA_FL_PREP",
6638*4882a593Smuzhiyun "IDMA_FL_REQ_DATA_FL",
6639*4882a593Smuzhiyun "IDMA_FL_DROP",
6640*4882a593Smuzhiyun "IDMA_FL_H_REQ_HEADER_FL",
6641*4882a593Smuzhiyun "IDMA_FL_H_SEND_PCIEHDR",
6642*4882a593Smuzhiyun "IDMA_FL_H_PUSH_CPL_FIFO",
6643*4882a593Smuzhiyun "IDMA_FL_H_SEND_CPL",
6644*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_FIRST",
6645*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR",
6646*4882a593Smuzhiyun "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6647*4882a593Smuzhiyun "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6648*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_PADDING",
6649*4882a593Smuzhiyun "IDMA_FL_D_SEND_PCIEHDR",
6650*4882a593Smuzhiyun "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6651*4882a593Smuzhiyun "IDMA_FL_D_REQ_NEXT_DATA_FL",
6652*4882a593Smuzhiyun "IDMA_FL_SEND_PCIEHDR",
6653*4882a593Smuzhiyun "IDMA_FL_PUSH_CPL_FIFO",
6654*4882a593Smuzhiyun "IDMA_FL_SEND_CPL",
6655*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD_FIRST",
6656*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD",
6657*4882a593Smuzhiyun "IDMA_FL_REQ_NEXT_DATA_FL",
6658*4882a593Smuzhiyun "IDMA_FL_SEND_NEXT_PCIEHDR",
6659*4882a593Smuzhiyun "IDMA_FL_SEND_PADDING",
6660*4882a593Smuzhiyun "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6661*4882a593Smuzhiyun "IDMA_FL_SEND_FIFO_TO_IMSG",
6662*4882a593Smuzhiyun "IDMA_FL_REQ_DATAFL_DONE",
6663*4882a593Smuzhiyun "IDMA_FL_REQ_HEADERFL_DONE",
6664*4882a593Smuzhiyun };
6665*4882a593Smuzhiyun static const char * const t5_decode[] = {
6666*4882a593Smuzhiyun "IDMA_IDLE",
6667*4882a593Smuzhiyun "IDMA_ALMOST_IDLE",
6668*4882a593Smuzhiyun "IDMA_PUSH_MORE_CPL_FIFO",
6669*4882a593Smuzhiyun "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6670*4882a593Smuzhiyun "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6671*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PCIEHDR",
6672*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6673*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD",
6674*4882a593Smuzhiyun "IDMA_SEND_FIFO_TO_IMSG",
6675*4882a593Smuzhiyun "IDMA_FL_REQ_DATA_FL",
6676*4882a593Smuzhiyun "IDMA_FL_DROP",
6677*4882a593Smuzhiyun "IDMA_FL_DROP_SEND_INC",
6678*4882a593Smuzhiyun "IDMA_FL_H_REQ_HEADER_FL",
6679*4882a593Smuzhiyun "IDMA_FL_H_SEND_PCIEHDR",
6680*4882a593Smuzhiyun "IDMA_FL_H_PUSH_CPL_FIFO",
6681*4882a593Smuzhiyun "IDMA_FL_H_SEND_CPL",
6682*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_FIRST",
6683*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR",
6684*4882a593Smuzhiyun "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6685*4882a593Smuzhiyun "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6686*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_PADDING",
6687*4882a593Smuzhiyun "IDMA_FL_D_SEND_PCIEHDR",
6688*4882a593Smuzhiyun "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6689*4882a593Smuzhiyun "IDMA_FL_D_REQ_NEXT_DATA_FL",
6690*4882a593Smuzhiyun "IDMA_FL_SEND_PCIEHDR",
6691*4882a593Smuzhiyun "IDMA_FL_PUSH_CPL_FIFO",
6692*4882a593Smuzhiyun "IDMA_FL_SEND_CPL",
6693*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD_FIRST",
6694*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD",
6695*4882a593Smuzhiyun "IDMA_FL_REQ_NEXT_DATA_FL",
6696*4882a593Smuzhiyun "IDMA_FL_SEND_NEXT_PCIEHDR",
6697*4882a593Smuzhiyun "IDMA_FL_SEND_PADDING",
6698*4882a593Smuzhiyun "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6699*4882a593Smuzhiyun };
6700*4882a593Smuzhiyun static const char * const t6_decode[] = {
6701*4882a593Smuzhiyun "IDMA_IDLE",
6702*4882a593Smuzhiyun "IDMA_PUSH_MORE_CPL_FIFO",
6703*4882a593Smuzhiyun "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6704*4882a593Smuzhiyun "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6705*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PCIEHDR",
6706*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6707*4882a593Smuzhiyun "IDMA_PHYSADDR_SEND_PAYLOAD",
6708*4882a593Smuzhiyun "IDMA_FL_REQ_DATA_FL",
6709*4882a593Smuzhiyun "IDMA_FL_DROP",
6710*4882a593Smuzhiyun "IDMA_FL_DROP_SEND_INC",
6711*4882a593Smuzhiyun "IDMA_FL_H_REQ_HEADER_FL",
6712*4882a593Smuzhiyun "IDMA_FL_H_SEND_PCIEHDR",
6713*4882a593Smuzhiyun "IDMA_FL_H_PUSH_CPL_FIFO",
6714*4882a593Smuzhiyun "IDMA_FL_H_SEND_CPL",
6715*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_FIRST",
6716*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR",
6717*4882a593Smuzhiyun "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6718*4882a593Smuzhiyun "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6719*4882a593Smuzhiyun "IDMA_FL_H_SEND_IP_HDR_PADDING",
6720*4882a593Smuzhiyun "IDMA_FL_D_SEND_PCIEHDR",
6721*4882a593Smuzhiyun "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6722*4882a593Smuzhiyun "IDMA_FL_D_REQ_NEXT_DATA_FL",
6723*4882a593Smuzhiyun "IDMA_FL_SEND_PCIEHDR",
6724*4882a593Smuzhiyun "IDMA_FL_PUSH_CPL_FIFO",
6725*4882a593Smuzhiyun "IDMA_FL_SEND_CPL",
6726*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD_FIRST",
6727*4882a593Smuzhiyun "IDMA_FL_SEND_PAYLOAD",
6728*4882a593Smuzhiyun "IDMA_FL_REQ_NEXT_DATA_FL",
6729*4882a593Smuzhiyun "IDMA_FL_SEND_NEXT_PCIEHDR",
6730*4882a593Smuzhiyun "IDMA_FL_SEND_PADDING",
6731*4882a593Smuzhiyun "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6732*4882a593Smuzhiyun };
6733*4882a593Smuzhiyun static const u32 sge_regs[] = {
6734*4882a593Smuzhiyun SGE_DEBUG_DATA_LOW_INDEX_2_A,
6735*4882a593Smuzhiyun SGE_DEBUG_DATA_LOW_INDEX_3_A,
6736*4882a593Smuzhiyun SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6737*4882a593Smuzhiyun };
6738*4882a593Smuzhiyun const char **sge_idma_decode;
6739*4882a593Smuzhiyun int sge_idma_decode_nstates;
6740*4882a593Smuzhiyun int i;
6741*4882a593Smuzhiyun unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6742*4882a593Smuzhiyun
6743*4882a593Smuzhiyun /* Select the right set of decode strings to dump depending on the
6744*4882a593Smuzhiyun * adapter chip type.
6745*4882a593Smuzhiyun */
6746*4882a593Smuzhiyun switch (chip_version) {
6747*4882a593Smuzhiyun case CHELSIO_T4:
6748*4882a593Smuzhiyun sge_idma_decode = (const char **)t4_decode;
6749*4882a593Smuzhiyun sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6750*4882a593Smuzhiyun break;
6751*4882a593Smuzhiyun
6752*4882a593Smuzhiyun case CHELSIO_T5:
6753*4882a593Smuzhiyun sge_idma_decode = (const char **)t5_decode;
6754*4882a593Smuzhiyun sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6755*4882a593Smuzhiyun break;
6756*4882a593Smuzhiyun
6757*4882a593Smuzhiyun case CHELSIO_T6:
6758*4882a593Smuzhiyun sge_idma_decode = (const char **)t6_decode;
6759*4882a593Smuzhiyun sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6760*4882a593Smuzhiyun break;
6761*4882a593Smuzhiyun
6762*4882a593Smuzhiyun default:
6763*4882a593Smuzhiyun dev_err(adapter->pdev_dev,
6764*4882a593Smuzhiyun "Unsupported chip version %d\n", chip_version);
6765*4882a593Smuzhiyun return;
6766*4882a593Smuzhiyun }
6767*4882a593Smuzhiyun
6768*4882a593Smuzhiyun if (is_t4(adapter->params.chip)) {
6769*4882a593Smuzhiyun sge_idma_decode = (const char **)t4_decode;
6770*4882a593Smuzhiyun sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6771*4882a593Smuzhiyun } else {
6772*4882a593Smuzhiyun sge_idma_decode = (const char **)t5_decode;
6773*4882a593Smuzhiyun sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6774*4882a593Smuzhiyun }
6775*4882a593Smuzhiyun
6776*4882a593Smuzhiyun if (state < sge_idma_decode_nstates)
6777*4882a593Smuzhiyun CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6778*4882a593Smuzhiyun else
6779*4882a593Smuzhiyun CH_WARN(adapter, "idma state %d unknown\n", state);
6780*4882a593Smuzhiyun
6781*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6782*4882a593Smuzhiyun CH_WARN(adapter, "SGE register %#x value %#x\n",
6783*4882a593Smuzhiyun sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6784*4882a593Smuzhiyun }
6785*4882a593Smuzhiyun
6786*4882a593Smuzhiyun /**
6787*4882a593Smuzhiyun * t4_sge_ctxt_flush - flush the SGE context cache
6788*4882a593Smuzhiyun * @adap: the adapter
6789*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
6790*4882a593Smuzhiyun * @ctxt_type: Egress or Ingress
6791*4882a593Smuzhiyun *
6792*4882a593Smuzhiyun * Issues a FW command through the given mailbox to flush the
6793*4882a593Smuzhiyun * SGE context cache.
6794*4882a593Smuzhiyun */
t4_sge_ctxt_flush(struct adapter * adap,unsigned int mbox,int ctxt_type)6795*4882a593Smuzhiyun int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6796*4882a593Smuzhiyun {
6797*4882a593Smuzhiyun int ret;
6798*4882a593Smuzhiyun u32 ldst_addrspace;
6799*4882a593Smuzhiyun struct fw_ldst_cmd c;
6800*4882a593Smuzhiyun
6801*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6802*4882a593Smuzhiyun ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6803*4882a593Smuzhiyun FW_LDST_ADDRSPC_SGE_EGRC :
6804*4882a593Smuzhiyun FW_LDST_ADDRSPC_SGE_INGC);
6805*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6806*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
6807*4882a593Smuzhiyun ldst_addrspace);
6808*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6809*4882a593Smuzhiyun c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6810*4882a593Smuzhiyun
6811*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6812*4882a593Smuzhiyun return ret;
6813*4882a593Smuzhiyun }
6814*4882a593Smuzhiyun
6815*4882a593Smuzhiyun /**
6816*4882a593Smuzhiyun * t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
6817*4882a593Smuzhiyun * @adap: the adapter
6818*4882a593Smuzhiyun * @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
6819*4882a593Smuzhiyun * @dbqtimers: SGE Doorbell Queue Timer table
6820*4882a593Smuzhiyun *
6821*4882a593Smuzhiyun * Reads the SGE Doorbell Queue Timer values into the provided table.
6822*4882a593Smuzhiyun * Returns 0 on success (Firmware and Hardware support this feature),
6823*4882a593Smuzhiyun * an error on failure.
6824*4882a593Smuzhiyun */
t4_read_sge_dbqtimers(struct adapter * adap,unsigned int ndbqtimers,u16 * dbqtimers)6825*4882a593Smuzhiyun int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
6826*4882a593Smuzhiyun u16 *dbqtimers)
6827*4882a593Smuzhiyun {
6828*4882a593Smuzhiyun int ret, dbqtimerix;
6829*4882a593Smuzhiyun
6830*4882a593Smuzhiyun ret = 0;
6831*4882a593Smuzhiyun dbqtimerix = 0;
6832*4882a593Smuzhiyun while (dbqtimerix < ndbqtimers) {
6833*4882a593Smuzhiyun int nparams, param;
6834*4882a593Smuzhiyun u32 params[7], vals[7];
6835*4882a593Smuzhiyun
6836*4882a593Smuzhiyun nparams = ndbqtimers - dbqtimerix;
6837*4882a593Smuzhiyun if (nparams > ARRAY_SIZE(params))
6838*4882a593Smuzhiyun nparams = ARRAY_SIZE(params);
6839*4882a593Smuzhiyun
6840*4882a593Smuzhiyun for (param = 0; param < nparams; param++)
6841*4882a593Smuzhiyun params[param] =
6842*4882a593Smuzhiyun (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6843*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMER) |
6844*4882a593Smuzhiyun FW_PARAMS_PARAM_Y_V(dbqtimerix + param));
6845*4882a593Smuzhiyun ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
6846*4882a593Smuzhiyun nparams, params, vals);
6847*4882a593Smuzhiyun if (ret)
6848*4882a593Smuzhiyun break;
6849*4882a593Smuzhiyun
6850*4882a593Smuzhiyun for (param = 0; param < nparams; param++)
6851*4882a593Smuzhiyun dbqtimers[dbqtimerix++] = vals[param];
6852*4882a593Smuzhiyun }
6853*4882a593Smuzhiyun return ret;
6854*4882a593Smuzhiyun }
6855*4882a593Smuzhiyun
6856*4882a593Smuzhiyun /**
6857*4882a593Smuzhiyun * t4_fw_hello - establish communication with FW
6858*4882a593Smuzhiyun * @adap: the adapter
6859*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
6860*4882a593Smuzhiyun * @evt_mbox: mailbox to receive async FW events
6861*4882a593Smuzhiyun * @master: specifies the caller's willingness to be the device master
6862*4882a593Smuzhiyun * @state: returns the current device state (if non-NULL)
6863*4882a593Smuzhiyun *
6864*4882a593Smuzhiyun * Issues a command to establish communication with FW. Returns either
6865*4882a593Smuzhiyun * an error (negative integer) or the mailbox of the Master PF.
6866*4882a593Smuzhiyun */
t4_fw_hello(struct adapter * adap,unsigned int mbox,unsigned int evt_mbox,enum dev_master master,enum dev_state * state)6867*4882a593Smuzhiyun int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6868*4882a593Smuzhiyun enum dev_master master, enum dev_state *state)
6869*4882a593Smuzhiyun {
6870*4882a593Smuzhiyun int ret;
6871*4882a593Smuzhiyun struct fw_hello_cmd c;
6872*4882a593Smuzhiyun u32 v;
6873*4882a593Smuzhiyun unsigned int master_mbox;
6874*4882a593Smuzhiyun int retries = FW_CMD_HELLO_RETRIES;
6875*4882a593Smuzhiyun
6876*4882a593Smuzhiyun retry:
6877*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6878*4882a593Smuzhiyun INIT_CMD(c, HELLO, WRITE);
6879*4882a593Smuzhiyun c.err_to_clearinit = cpu_to_be32(
6880*4882a593Smuzhiyun FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6881*4882a593Smuzhiyun FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6882*4882a593Smuzhiyun FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6883*4882a593Smuzhiyun mbox : FW_HELLO_CMD_MBMASTER_M) |
6884*4882a593Smuzhiyun FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6885*4882a593Smuzhiyun FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6886*4882a593Smuzhiyun FW_HELLO_CMD_CLEARINIT_F);
6887*4882a593Smuzhiyun
6888*4882a593Smuzhiyun /*
6889*4882a593Smuzhiyun * Issue the HELLO command to the firmware. If it's not successful
6890*4882a593Smuzhiyun * but indicates that we got a "busy" or "timeout" condition, retry
6891*4882a593Smuzhiyun * the HELLO until we exhaust our retry limit. If we do exceed our
6892*4882a593Smuzhiyun * retry limit, check to see if the firmware left us any error
6893*4882a593Smuzhiyun * information and report that if so.
6894*4882a593Smuzhiyun */
6895*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6896*4882a593Smuzhiyun if (ret < 0) {
6897*4882a593Smuzhiyun if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6898*4882a593Smuzhiyun goto retry;
6899*4882a593Smuzhiyun if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6900*4882a593Smuzhiyun t4_report_fw_error(adap);
6901*4882a593Smuzhiyun return ret;
6902*4882a593Smuzhiyun }
6903*4882a593Smuzhiyun
6904*4882a593Smuzhiyun v = be32_to_cpu(c.err_to_clearinit);
6905*4882a593Smuzhiyun master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6906*4882a593Smuzhiyun if (state) {
6907*4882a593Smuzhiyun if (v & FW_HELLO_CMD_ERR_F)
6908*4882a593Smuzhiyun *state = DEV_STATE_ERR;
6909*4882a593Smuzhiyun else if (v & FW_HELLO_CMD_INIT_F)
6910*4882a593Smuzhiyun *state = DEV_STATE_INIT;
6911*4882a593Smuzhiyun else
6912*4882a593Smuzhiyun *state = DEV_STATE_UNINIT;
6913*4882a593Smuzhiyun }
6914*4882a593Smuzhiyun
6915*4882a593Smuzhiyun /*
6916*4882a593Smuzhiyun * If we're not the Master PF then we need to wait around for the
6917*4882a593Smuzhiyun * Master PF Driver to finish setting up the adapter.
6918*4882a593Smuzhiyun *
6919*4882a593Smuzhiyun * Note that we also do this wait if we're a non-Master-capable PF and
6920*4882a593Smuzhiyun * there is no current Master PF; a Master PF may show up momentarily
6921*4882a593Smuzhiyun * and we wouldn't want to fail pointlessly. (This can happen when an
6922*4882a593Smuzhiyun * OS loads lots of different drivers rapidly at the same time). In
6923*4882a593Smuzhiyun * this case, the Master PF returned by the firmware will be
6924*4882a593Smuzhiyun * PCIE_FW_MASTER_M so the test below will work ...
6925*4882a593Smuzhiyun */
6926*4882a593Smuzhiyun if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6927*4882a593Smuzhiyun master_mbox != mbox) {
6928*4882a593Smuzhiyun int waiting = FW_CMD_HELLO_TIMEOUT;
6929*4882a593Smuzhiyun
6930*4882a593Smuzhiyun /*
6931*4882a593Smuzhiyun * Wait for the firmware to either indicate an error or
6932*4882a593Smuzhiyun * initialized state. If we see either of these we bail out
6933*4882a593Smuzhiyun * and report the issue to the caller. If we exhaust the
6934*4882a593Smuzhiyun * "hello timeout" and we haven't exhausted our retries, try
6935*4882a593Smuzhiyun * again. Otherwise bail with a timeout error.
6936*4882a593Smuzhiyun */
6937*4882a593Smuzhiyun for (;;) {
6938*4882a593Smuzhiyun u32 pcie_fw;
6939*4882a593Smuzhiyun
6940*4882a593Smuzhiyun msleep(50);
6941*4882a593Smuzhiyun waiting -= 50;
6942*4882a593Smuzhiyun
6943*4882a593Smuzhiyun /*
6944*4882a593Smuzhiyun * If neither Error nor Initialized are indicated
6945*4882a593Smuzhiyun * by the firmware keep waiting till we exhaust our
6946*4882a593Smuzhiyun * timeout ... and then retry if we haven't exhausted
6947*4882a593Smuzhiyun * our retries ...
6948*4882a593Smuzhiyun */
6949*4882a593Smuzhiyun pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6950*4882a593Smuzhiyun if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6951*4882a593Smuzhiyun if (waiting <= 0) {
6952*4882a593Smuzhiyun if (retries-- > 0)
6953*4882a593Smuzhiyun goto retry;
6954*4882a593Smuzhiyun
6955*4882a593Smuzhiyun return -ETIMEDOUT;
6956*4882a593Smuzhiyun }
6957*4882a593Smuzhiyun continue;
6958*4882a593Smuzhiyun }
6959*4882a593Smuzhiyun
6960*4882a593Smuzhiyun /*
6961*4882a593Smuzhiyun * We either have an Error or Initialized condition
6962*4882a593Smuzhiyun * report errors preferentially.
6963*4882a593Smuzhiyun */
6964*4882a593Smuzhiyun if (state) {
6965*4882a593Smuzhiyun if (pcie_fw & PCIE_FW_ERR_F)
6966*4882a593Smuzhiyun *state = DEV_STATE_ERR;
6967*4882a593Smuzhiyun else if (pcie_fw & PCIE_FW_INIT_F)
6968*4882a593Smuzhiyun *state = DEV_STATE_INIT;
6969*4882a593Smuzhiyun }
6970*4882a593Smuzhiyun
6971*4882a593Smuzhiyun /*
6972*4882a593Smuzhiyun * If we arrived before a Master PF was selected and
6973*4882a593Smuzhiyun * there's not a valid Master PF, grab its identity
6974*4882a593Smuzhiyun * for our caller.
6975*4882a593Smuzhiyun */
6976*4882a593Smuzhiyun if (master_mbox == PCIE_FW_MASTER_M &&
6977*4882a593Smuzhiyun (pcie_fw & PCIE_FW_MASTER_VLD_F))
6978*4882a593Smuzhiyun master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6979*4882a593Smuzhiyun break;
6980*4882a593Smuzhiyun }
6981*4882a593Smuzhiyun }
6982*4882a593Smuzhiyun
6983*4882a593Smuzhiyun return master_mbox;
6984*4882a593Smuzhiyun }
6985*4882a593Smuzhiyun
6986*4882a593Smuzhiyun /**
6987*4882a593Smuzhiyun * t4_fw_bye - end communication with FW
6988*4882a593Smuzhiyun * @adap: the adapter
6989*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
6990*4882a593Smuzhiyun *
6991*4882a593Smuzhiyun * Issues a command to terminate communication with FW.
6992*4882a593Smuzhiyun */
t4_fw_bye(struct adapter * adap,unsigned int mbox)6993*4882a593Smuzhiyun int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6994*4882a593Smuzhiyun {
6995*4882a593Smuzhiyun struct fw_bye_cmd c;
6996*4882a593Smuzhiyun
6997*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
6998*4882a593Smuzhiyun INIT_CMD(c, BYE, WRITE);
6999*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7000*4882a593Smuzhiyun }
7001*4882a593Smuzhiyun
7002*4882a593Smuzhiyun /**
7003*4882a593Smuzhiyun * t4_init_cmd - ask FW to initialize the device
7004*4882a593Smuzhiyun * @adap: the adapter
7005*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7006*4882a593Smuzhiyun *
7007*4882a593Smuzhiyun * Issues a command to FW to partially initialize the device. This
7008*4882a593Smuzhiyun * performs initialization that generally doesn't depend on user input.
7009*4882a593Smuzhiyun */
t4_early_init(struct adapter * adap,unsigned int mbox)7010*4882a593Smuzhiyun int t4_early_init(struct adapter *adap, unsigned int mbox)
7011*4882a593Smuzhiyun {
7012*4882a593Smuzhiyun struct fw_initialize_cmd c;
7013*4882a593Smuzhiyun
7014*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7015*4882a593Smuzhiyun INIT_CMD(c, INITIALIZE, WRITE);
7016*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7017*4882a593Smuzhiyun }
7018*4882a593Smuzhiyun
7019*4882a593Smuzhiyun /**
7020*4882a593Smuzhiyun * t4_fw_reset - issue a reset to FW
7021*4882a593Smuzhiyun * @adap: the adapter
7022*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7023*4882a593Smuzhiyun * @reset: specifies the type of reset to perform
7024*4882a593Smuzhiyun *
7025*4882a593Smuzhiyun * Issues a reset command of the specified type to FW.
7026*4882a593Smuzhiyun */
t4_fw_reset(struct adapter * adap,unsigned int mbox,int reset)7027*4882a593Smuzhiyun int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
7028*4882a593Smuzhiyun {
7029*4882a593Smuzhiyun struct fw_reset_cmd c;
7030*4882a593Smuzhiyun
7031*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7032*4882a593Smuzhiyun INIT_CMD(c, RESET, WRITE);
7033*4882a593Smuzhiyun c.val = cpu_to_be32(reset);
7034*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7035*4882a593Smuzhiyun }
7036*4882a593Smuzhiyun
7037*4882a593Smuzhiyun /**
7038*4882a593Smuzhiyun * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
7039*4882a593Smuzhiyun * @adap: the adapter
7040*4882a593Smuzhiyun * @mbox: mailbox to use for the FW RESET command (if desired)
7041*4882a593Smuzhiyun * @force: force uP into RESET even if FW RESET command fails
7042*4882a593Smuzhiyun *
7043*4882a593Smuzhiyun * Issues a RESET command to firmware (if desired) with a HALT indication
7044*4882a593Smuzhiyun * and then puts the microprocessor into RESET state. The RESET command
7045*4882a593Smuzhiyun * will only be issued if a legitimate mailbox is provided (mbox <=
7046*4882a593Smuzhiyun * PCIE_FW_MASTER_M).
7047*4882a593Smuzhiyun *
7048*4882a593Smuzhiyun * This is generally used in order for the host to safely manipulate the
7049*4882a593Smuzhiyun * adapter without fear of conflicting with whatever the firmware might
7050*4882a593Smuzhiyun * be doing. The only way out of this state is to RESTART the firmware
7051*4882a593Smuzhiyun * ...
7052*4882a593Smuzhiyun */
t4_fw_halt(struct adapter * adap,unsigned int mbox,int force)7053*4882a593Smuzhiyun static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
7054*4882a593Smuzhiyun {
7055*4882a593Smuzhiyun int ret = 0;
7056*4882a593Smuzhiyun
7057*4882a593Smuzhiyun /*
7058*4882a593Smuzhiyun * If a legitimate mailbox is provided, issue a RESET command
7059*4882a593Smuzhiyun * with a HALT indication.
7060*4882a593Smuzhiyun */
7061*4882a593Smuzhiyun if (mbox <= PCIE_FW_MASTER_M) {
7062*4882a593Smuzhiyun struct fw_reset_cmd c;
7063*4882a593Smuzhiyun
7064*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7065*4882a593Smuzhiyun INIT_CMD(c, RESET, WRITE);
7066*4882a593Smuzhiyun c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
7067*4882a593Smuzhiyun c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
7068*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7069*4882a593Smuzhiyun }
7070*4882a593Smuzhiyun
7071*4882a593Smuzhiyun /*
7072*4882a593Smuzhiyun * Normally we won't complete the operation if the firmware RESET
7073*4882a593Smuzhiyun * command fails but if our caller insists we'll go ahead and put the
7074*4882a593Smuzhiyun * uP into RESET. This can be useful if the firmware is hung or even
7075*4882a593Smuzhiyun * missing ... We'll have to take the risk of putting the uP into
7076*4882a593Smuzhiyun * RESET without the cooperation of firmware in that case.
7077*4882a593Smuzhiyun *
7078*4882a593Smuzhiyun * We also force the firmware's HALT flag to be on in case we bypassed
7079*4882a593Smuzhiyun * the firmware RESET command above or we're dealing with old firmware
7080*4882a593Smuzhiyun * which doesn't have the HALT capability. This will serve as a flag
7081*4882a593Smuzhiyun * for the incoming firmware to know that it's coming out of a HALT
7082*4882a593Smuzhiyun * rather than a RESET ... if it's new enough to understand that ...
7083*4882a593Smuzhiyun */
7084*4882a593Smuzhiyun if (ret == 0 || force) {
7085*4882a593Smuzhiyun t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
7086*4882a593Smuzhiyun t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
7087*4882a593Smuzhiyun PCIE_FW_HALT_F);
7088*4882a593Smuzhiyun }
7089*4882a593Smuzhiyun
7090*4882a593Smuzhiyun /*
7091*4882a593Smuzhiyun * And we always return the result of the firmware RESET command
7092*4882a593Smuzhiyun * even when we force the uP into RESET ...
7093*4882a593Smuzhiyun */
7094*4882a593Smuzhiyun return ret;
7095*4882a593Smuzhiyun }
7096*4882a593Smuzhiyun
7097*4882a593Smuzhiyun /**
7098*4882a593Smuzhiyun * t4_fw_restart - restart the firmware by taking the uP out of RESET
7099*4882a593Smuzhiyun * @adap: the adapter
7100*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7101*4882a593Smuzhiyun * @reset: if we want to do a RESET to restart things
7102*4882a593Smuzhiyun *
7103*4882a593Smuzhiyun * Restart firmware previously halted by t4_fw_halt(). On successful
7104*4882a593Smuzhiyun * return the previous PF Master remains as the new PF Master and there
7105*4882a593Smuzhiyun * is no need to issue a new HELLO command, etc.
7106*4882a593Smuzhiyun *
7107*4882a593Smuzhiyun * We do this in two ways:
7108*4882a593Smuzhiyun *
7109*4882a593Smuzhiyun * 1. If we're dealing with newer firmware we'll simply want to take
7110*4882a593Smuzhiyun * the chip's microprocessor out of RESET. This will cause the
7111*4882a593Smuzhiyun * firmware to start up from its start vector. And then we'll loop
7112*4882a593Smuzhiyun * until the firmware indicates it's started again (PCIE_FW.HALT
7113*4882a593Smuzhiyun * reset to 0) or we timeout.
7114*4882a593Smuzhiyun *
7115*4882a593Smuzhiyun * 2. If we're dealing with older firmware then we'll need to RESET
7116*4882a593Smuzhiyun * the chip since older firmware won't recognize the PCIE_FW.HALT
7117*4882a593Smuzhiyun * flag and automatically RESET itself on startup.
7118*4882a593Smuzhiyun */
t4_fw_restart(struct adapter * adap,unsigned int mbox,int reset)7119*4882a593Smuzhiyun static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
7120*4882a593Smuzhiyun {
7121*4882a593Smuzhiyun if (reset) {
7122*4882a593Smuzhiyun /*
7123*4882a593Smuzhiyun * Since we're directing the RESET instead of the firmware
7124*4882a593Smuzhiyun * doing it automatically, we need to clear the PCIE_FW.HALT
7125*4882a593Smuzhiyun * bit.
7126*4882a593Smuzhiyun */
7127*4882a593Smuzhiyun t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
7128*4882a593Smuzhiyun
7129*4882a593Smuzhiyun /*
7130*4882a593Smuzhiyun * If we've been given a valid mailbox, first try to get the
7131*4882a593Smuzhiyun * firmware to do the RESET. If that works, great and we can
7132*4882a593Smuzhiyun * return success. Otherwise, if we haven't been given a
7133*4882a593Smuzhiyun * valid mailbox or the RESET command failed, fall back to
7134*4882a593Smuzhiyun * hitting the chip with a hammer.
7135*4882a593Smuzhiyun */
7136*4882a593Smuzhiyun if (mbox <= PCIE_FW_MASTER_M) {
7137*4882a593Smuzhiyun t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7138*4882a593Smuzhiyun msleep(100);
7139*4882a593Smuzhiyun if (t4_fw_reset(adap, mbox,
7140*4882a593Smuzhiyun PIORST_F | PIORSTMODE_F) == 0)
7141*4882a593Smuzhiyun return 0;
7142*4882a593Smuzhiyun }
7143*4882a593Smuzhiyun
7144*4882a593Smuzhiyun t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
7145*4882a593Smuzhiyun msleep(2000);
7146*4882a593Smuzhiyun } else {
7147*4882a593Smuzhiyun int ms;
7148*4882a593Smuzhiyun
7149*4882a593Smuzhiyun t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
7150*4882a593Smuzhiyun for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
7151*4882a593Smuzhiyun if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
7152*4882a593Smuzhiyun return 0;
7153*4882a593Smuzhiyun msleep(100);
7154*4882a593Smuzhiyun ms += 100;
7155*4882a593Smuzhiyun }
7156*4882a593Smuzhiyun return -ETIMEDOUT;
7157*4882a593Smuzhiyun }
7158*4882a593Smuzhiyun return 0;
7159*4882a593Smuzhiyun }
7160*4882a593Smuzhiyun
7161*4882a593Smuzhiyun /**
7162*4882a593Smuzhiyun * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
7163*4882a593Smuzhiyun * @adap: the adapter
7164*4882a593Smuzhiyun * @mbox: mailbox to use for the FW RESET command (if desired)
7165*4882a593Smuzhiyun * @fw_data: the firmware image to write
7166*4882a593Smuzhiyun * @size: image size
7167*4882a593Smuzhiyun * @force: force upgrade even if firmware doesn't cooperate
7168*4882a593Smuzhiyun *
7169*4882a593Smuzhiyun * Perform all of the steps necessary for upgrading an adapter's
7170*4882a593Smuzhiyun * firmware image. Normally this requires the cooperation of the
7171*4882a593Smuzhiyun * existing firmware in order to halt all existing activities
7172*4882a593Smuzhiyun * but if an invalid mailbox token is passed in we skip that step
7173*4882a593Smuzhiyun * (though we'll still put the adapter microprocessor into RESET in
7174*4882a593Smuzhiyun * that case).
7175*4882a593Smuzhiyun *
7176*4882a593Smuzhiyun * On successful return the new firmware will have been loaded and
7177*4882a593Smuzhiyun * the adapter will have been fully RESET losing all previous setup
7178*4882a593Smuzhiyun * state. On unsuccessful return the adapter may be completely hosed ...
7179*4882a593Smuzhiyun * positive errno indicates that the adapter is ~probably~ intact, a
7180*4882a593Smuzhiyun * negative errno indicates that things are looking bad ...
7181*4882a593Smuzhiyun */
t4_fw_upgrade(struct adapter * adap,unsigned int mbox,const u8 * fw_data,unsigned int size,int force)7182*4882a593Smuzhiyun int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7183*4882a593Smuzhiyun const u8 *fw_data, unsigned int size, int force)
7184*4882a593Smuzhiyun {
7185*4882a593Smuzhiyun const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7186*4882a593Smuzhiyun int reset, ret;
7187*4882a593Smuzhiyun
7188*4882a593Smuzhiyun if (!t4_fw_matches_chip(adap, fw_hdr))
7189*4882a593Smuzhiyun return -EINVAL;
7190*4882a593Smuzhiyun
7191*4882a593Smuzhiyun /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
7192*4882a593Smuzhiyun * set wont be sent when we are flashing FW.
7193*4882a593Smuzhiyun */
7194*4882a593Smuzhiyun adap->flags &= ~CXGB4_FW_OK;
7195*4882a593Smuzhiyun
7196*4882a593Smuzhiyun ret = t4_fw_halt(adap, mbox, force);
7197*4882a593Smuzhiyun if (ret < 0 && !force)
7198*4882a593Smuzhiyun goto out;
7199*4882a593Smuzhiyun
7200*4882a593Smuzhiyun ret = t4_load_fw(adap, fw_data, size);
7201*4882a593Smuzhiyun if (ret < 0)
7202*4882a593Smuzhiyun goto out;
7203*4882a593Smuzhiyun
7204*4882a593Smuzhiyun /*
7205*4882a593Smuzhiyun * If there was a Firmware Configuration File stored in FLASH,
7206*4882a593Smuzhiyun * there's a good chance that it won't be compatible with the new
7207*4882a593Smuzhiyun * Firmware. In order to prevent difficult to diagnose adapter
7208*4882a593Smuzhiyun * initialization issues, we clear out the Firmware Configuration File
7209*4882a593Smuzhiyun * portion of the FLASH . The user will need to re-FLASH a new
7210*4882a593Smuzhiyun * Firmware Configuration File which is compatible with the new
7211*4882a593Smuzhiyun * Firmware if that's desired.
7212*4882a593Smuzhiyun */
7213*4882a593Smuzhiyun (void)t4_load_cfg(adap, NULL, 0);
7214*4882a593Smuzhiyun
7215*4882a593Smuzhiyun /*
7216*4882a593Smuzhiyun * Older versions of the firmware don't understand the new
7217*4882a593Smuzhiyun * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7218*4882a593Smuzhiyun * restart. So for newly loaded older firmware we'll have to do the
7219*4882a593Smuzhiyun * RESET for it so it starts up on a clean slate. We can tell if
7220*4882a593Smuzhiyun * the newly loaded firmware will handle this right by checking
7221*4882a593Smuzhiyun * its header flags to see if it advertises the capability.
7222*4882a593Smuzhiyun */
7223*4882a593Smuzhiyun reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7224*4882a593Smuzhiyun ret = t4_fw_restart(adap, mbox, reset);
7225*4882a593Smuzhiyun
7226*4882a593Smuzhiyun /* Grab potentially new Firmware Device Log parameters so we can see
7227*4882a593Smuzhiyun * how healthy the new Firmware is. It's okay to contact the new
7228*4882a593Smuzhiyun * Firmware for these parameters even though, as far as it's
7229*4882a593Smuzhiyun * concerned, we've never said "HELLO" to it ...
7230*4882a593Smuzhiyun */
7231*4882a593Smuzhiyun (void)t4_init_devlog_params(adap);
7232*4882a593Smuzhiyun out:
7233*4882a593Smuzhiyun adap->flags |= CXGB4_FW_OK;
7234*4882a593Smuzhiyun return ret;
7235*4882a593Smuzhiyun }
7236*4882a593Smuzhiyun
7237*4882a593Smuzhiyun /**
7238*4882a593Smuzhiyun * t4_fl_pkt_align - return the fl packet alignment
7239*4882a593Smuzhiyun * @adap: the adapter
7240*4882a593Smuzhiyun *
7241*4882a593Smuzhiyun * T4 has a single field to specify the packing and padding boundary.
7242*4882a593Smuzhiyun * T5 onwards has separate fields for this and hence the alignment for
7243*4882a593Smuzhiyun * next packet offset is maximum of these two.
7244*4882a593Smuzhiyun *
7245*4882a593Smuzhiyun */
t4_fl_pkt_align(struct adapter * adap)7246*4882a593Smuzhiyun int t4_fl_pkt_align(struct adapter *adap)
7247*4882a593Smuzhiyun {
7248*4882a593Smuzhiyun u32 sge_control, sge_control2;
7249*4882a593Smuzhiyun unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7250*4882a593Smuzhiyun
7251*4882a593Smuzhiyun sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7252*4882a593Smuzhiyun
7253*4882a593Smuzhiyun /* T4 uses a single control field to specify both the PCIe Padding and
7254*4882a593Smuzhiyun * Packing Boundary. T5 introduced the ability to specify these
7255*4882a593Smuzhiyun * separately. The actual Ingress Packet Data alignment boundary
7256*4882a593Smuzhiyun * within Packed Buffer Mode is the maximum of these two
7257*4882a593Smuzhiyun * specifications. (Note that it makes no real practical sense to
7258*4882a593Smuzhiyun * have the Padding Boundary be larger than the Packing Boundary but you
7259*4882a593Smuzhiyun * could set the chip up that way and, in fact, legacy T4 code would
7260*4882a593Smuzhiyun * end doing this because it would initialize the Padding Boundary and
7261*4882a593Smuzhiyun * leave the Packing Boundary initialized to 0 (16 bytes).)
7262*4882a593Smuzhiyun * Padding Boundary values in T6 starts from 8B,
7263*4882a593Smuzhiyun * where as it is 32B for T4 and T5.
7264*4882a593Smuzhiyun */
7265*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7266*4882a593Smuzhiyun ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7267*4882a593Smuzhiyun else
7268*4882a593Smuzhiyun ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7269*4882a593Smuzhiyun
7270*4882a593Smuzhiyun ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7271*4882a593Smuzhiyun
7272*4882a593Smuzhiyun fl_align = ingpadboundary;
7273*4882a593Smuzhiyun if (!is_t4(adap->params.chip)) {
7274*4882a593Smuzhiyun /* T5 has a weird interpretation of one of the PCIe Packing
7275*4882a593Smuzhiyun * Boundary values. No idea why ...
7276*4882a593Smuzhiyun */
7277*4882a593Smuzhiyun sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7278*4882a593Smuzhiyun ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7279*4882a593Smuzhiyun if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7280*4882a593Smuzhiyun ingpackboundary = 16;
7281*4882a593Smuzhiyun else
7282*4882a593Smuzhiyun ingpackboundary = 1 << (ingpackboundary +
7283*4882a593Smuzhiyun INGPACKBOUNDARY_SHIFT_X);
7284*4882a593Smuzhiyun
7285*4882a593Smuzhiyun fl_align = max(ingpadboundary, ingpackboundary);
7286*4882a593Smuzhiyun }
7287*4882a593Smuzhiyun return fl_align;
7288*4882a593Smuzhiyun }
7289*4882a593Smuzhiyun
7290*4882a593Smuzhiyun /**
7291*4882a593Smuzhiyun * t4_fixup_host_params - fix up host-dependent parameters
7292*4882a593Smuzhiyun * @adap: the adapter
7293*4882a593Smuzhiyun * @page_size: the host's Base Page Size
7294*4882a593Smuzhiyun * @cache_line_size: the host's Cache Line Size
7295*4882a593Smuzhiyun *
7296*4882a593Smuzhiyun * Various registers in T4 contain values which are dependent on the
7297*4882a593Smuzhiyun * host's Base Page and Cache Line Sizes. This function will fix all of
7298*4882a593Smuzhiyun * those registers with the appropriate values as passed in ...
7299*4882a593Smuzhiyun */
t4_fixup_host_params(struct adapter * adap,unsigned int page_size,unsigned int cache_line_size)7300*4882a593Smuzhiyun int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7301*4882a593Smuzhiyun unsigned int cache_line_size)
7302*4882a593Smuzhiyun {
7303*4882a593Smuzhiyun unsigned int page_shift = fls(page_size) - 1;
7304*4882a593Smuzhiyun unsigned int sge_hps = page_shift - 10;
7305*4882a593Smuzhiyun unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7306*4882a593Smuzhiyun unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7307*4882a593Smuzhiyun unsigned int fl_align_log = fls(fl_align) - 1;
7308*4882a593Smuzhiyun
7309*4882a593Smuzhiyun t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7310*4882a593Smuzhiyun HOSTPAGESIZEPF0_V(sge_hps) |
7311*4882a593Smuzhiyun HOSTPAGESIZEPF1_V(sge_hps) |
7312*4882a593Smuzhiyun HOSTPAGESIZEPF2_V(sge_hps) |
7313*4882a593Smuzhiyun HOSTPAGESIZEPF3_V(sge_hps) |
7314*4882a593Smuzhiyun HOSTPAGESIZEPF4_V(sge_hps) |
7315*4882a593Smuzhiyun HOSTPAGESIZEPF5_V(sge_hps) |
7316*4882a593Smuzhiyun HOSTPAGESIZEPF6_V(sge_hps) |
7317*4882a593Smuzhiyun HOSTPAGESIZEPF7_V(sge_hps));
7318*4882a593Smuzhiyun
7319*4882a593Smuzhiyun if (is_t4(adap->params.chip)) {
7320*4882a593Smuzhiyun t4_set_reg_field(adap, SGE_CONTROL_A,
7321*4882a593Smuzhiyun INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7322*4882a593Smuzhiyun EGRSTATUSPAGESIZE_F,
7323*4882a593Smuzhiyun INGPADBOUNDARY_V(fl_align_log -
7324*4882a593Smuzhiyun INGPADBOUNDARY_SHIFT_X) |
7325*4882a593Smuzhiyun EGRSTATUSPAGESIZE_V(stat_len != 64));
7326*4882a593Smuzhiyun } else {
7327*4882a593Smuzhiyun unsigned int pack_align;
7328*4882a593Smuzhiyun unsigned int ingpad, ingpack;
7329*4882a593Smuzhiyun
7330*4882a593Smuzhiyun /* T5 introduced the separation of the Free List Padding and
7331*4882a593Smuzhiyun * Packing Boundaries. Thus, we can select a smaller Padding
7332*4882a593Smuzhiyun * Boundary to avoid uselessly chewing up PCIe Link and Memory
7333*4882a593Smuzhiyun * Bandwidth, and use a Packing Boundary which is large enough
7334*4882a593Smuzhiyun * to avoid false sharing between CPUs, etc.
7335*4882a593Smuzhiyun *
7336*4882a593Smuzhiyun * For the PCI Link, the smaller the Padding Boundary the
7337*4882a593Smuzhiyun * better. For the Memory Controller, a smaller Padding
7338*4882a593Smuzhiyun * Boundary is better until we cross under the Memory Line
7339*4882a593Smuzhiyun * Size (the minimum unit of transfer to/from Memory). If we
7340*4882a593Smuzhiyun * have a Padding Boundary which is smaller than the Memory
7341*4882a593Smuzhiyun * Line Size, that'll involve a Read-Modify-Write cycle on the
7342*4882a593Smuzhiyun * Memory Controller which is never good.
7343*4882a593Smuzhiyun */
7344*4882a593Smuzhiyun
7345*4882a593Smuzhiyun /* We want the Packing Boundary to be based on the Cache Line
7346*4882a593Smuzhiyun * Size in order to help avoid False Sharing performance
7347*4882a593Smuzhiyun * issues between CPUs, etc. We also want the Packing
7348*4882a593Smuzhiyun * Boundary to incorporate the PCI-E Maximum Payload Size. We
7349*4882a593Smuzhiyun * get best performance when the Packing Boundary is a
7350*4882a593Smuzhiyun * multiple of the Maximum Payload Size.
7351*4882a593Smuzhiyun */
7352*4882a593Smuzhiyun pack_align = fl_align;
7353*4882a593Smuzhiyun if (pci_is_pcie(adap->pdev)) {
7354*4882a593Smuzhiyun unsigned int mps, mps_log;
7355*4882a593Smuzhiyun u16 devctl;
7356*4882a593Smuzhiyun
7357*4882a593Smuzhiyun /* The PCIe Device Control Maximum Payload Size field
7358*4882a593Smuzhiyun * [bits 7:5] encodes sizes as powers of 2 starting at
7359*4882a593Smuzhiyun * 128 bytes.
7360*4882a593Smuzhiyun */
7361*4882a593Smuzhiyun pcie_capability_read_word(adap->pdev, PCI_EXP_DEVCTL,
7362*4882a593Smuzhiyun &devctl);
7363*4882a593Smuzhiyun mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7364*4882a593Smuzhiyun mps = 1 << mps_log;
7365*4882a593Smuzhiyun if (mps > pack_align)
7366*4882a593Smuzhiyun pack_align = mps;
7367*4882a593Smuzhiyun }
7368*4882a593Smuzhiyun
7369*4882a593Smuzhiyun /* N.B. T5/T6 have a crazy special interpretation of the "0"
7370*4882a593Smuzhiyun * value for the Packing Boundary. This corresponds to 16
7371*4882a593Smuzhiyun * bytes instead of the expected 32 bytes. So if we want 32
7372*4882a593Smuzhiyun * bytes, the best we can really do is 64 bytes ...
7373*4882a593Smuzhiyun */
7374*4882a593Smuzhiyun if (pack_align <= 16) {
7375*4882a593Smuzhiyun ingpack = INGPACKBOUNDARY_16B_X;
7376*4882a593Smuzhiyun fl_align = 16;
7377*4882a593Smuzhiyun } else if (pack_align == 32) {
7378*4882a593Smuzhiyun ingpack = INGPACKBOUNDARY_64B_X;
7379*4882a593Smuzhiyun fl_align = 64;
7380*4882a593Smuzhiyun } else {
7381*4882a593Smuzhiyun unsigned int pack_align_log = fls(pack_align) - 1;
7382*4882a593Smuzhiyun
7383*4882a593Smuzhiyun ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7384*4882a593Smuzhiyun fl_align = pack_align;
7385*4882a593Smuzhiyun }
7386*4882a593Smuzhiyun
7387*4882a593Smuzhiyun /* Use the smallest Ingress Padding which isn't smaller than
7388*4882a593Smuzhiyun * the Memory Controller Read/Write Size. We'll take that as
7389*4882a593Smuzhiyun * being 8 bytes since we don't know of any system with a
7390*4882a593Smuzhiyun * wider Memory Controller Bus Width.
7391*4882a593Smuzhiyun */
7392*4882a593Smuzhiyun if (is_t5(adap->params.chip))
7393*4882a593Smuzhiyun ingpad = INGPADBOUNDARY_32B_X;
7394*4882a593Smuzhiyun else
7395*4882a593Smuzhiyun ingpad = T6_INGPADBOUNDARY_8B_X;
7396*4882a593Smuzhiyun
7397*4882a593Smuzhiyun t4_set_reg_field(adap, SGE_CONTROL_A,
7398*4882a593Smuzhiyun INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7399*4882a593Smuzhiyun EGRSTATUSPAGESIZE_F,
7400*4882a593Smuzhiyun INGPADBOUNDARY_V(ingpad) |
7401*4882a593Smuzhiyun EGRSTATUSPAGESIZE_V(stat_len != 64));
7402*4882a593Smuzhiyun t4_set_reg_field(adap, SGE_CONTROL2_A,
7403*4882a593Smuzhiyun INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7404*4882a593Smuzhiyun INGPACKBOUNDARY_V(ingpack));
7405*4882a593Smuzhiyun }
7406*4882a593Smuzhiyun /*
7407*4882a593Smuzhiyun * Adjust various SGE Free List Host Buffer Sizes.
7408*4882a593Smuzhiyun *
7409*4882a593Smuzhiyun * This is something of a crock since we're using fixed indices into
7410*4882a593Smuzhiyun * the array which are also known by the sge.c code and the T4
7411*4882a593Smuzhiyun * Firmware Configuration File. We need to come up with a much better
7412*4882a593Smuzhiyun * approach to managing this array. For now, the first four entries
7413*4882a593Smuzhiyun * are:
7414*4882a593Smuzhiyun *
7415*4882a593Smuzhiyun * 0: Host Page Size
7416*4882a593Smuzhiyun * 1: 64KB
7417*4882a593Smuzhiyun * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7418*4882a593Smuzhiyun * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7419*4882a593Smuzhiyun *
7420*4882a593Smuzhiyun * For the single-MTU buffers in unpacked mode we need to include
7421*4882a593Smuzhiyun * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7422*4882a593Smuzhiyun * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7423*4882a593Smuzhiyun * Padding boundary. All of these are accommodated in the Factory
7424*4882a593Smuzhiyun * Default Firmware Configuration File but we need to adjust it for
7425*4882a593Smuzhiyun * this host's cache line size.
7426*4882a593Smuzhiyun */
7427*4882a593Smuzhiyun t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7428*4882a593Smuzhiyun t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7429*4882a593Smuzhiyun (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7430*4882a593Smuzhiyun & ~(fl_align-1));
7431*4882a593Smuzhiyun t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7432*4882a593Smuzhiyun (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7433*4882a593Smuzhiyun & ~(fl_align-1));
7434*4882a593Smuzhiyun
7435*4882a593Smuzhiyun t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7436*4882a593Smuzhiyun
7437*4882a593Smuzhiyun return 0;
7438*4882a593Smuzhiyun }
7439*4882a593Smuzhiyun
7440*4882a593Smuzhiyun /**
7441*4882a593Smuzhiyun * t4_fw_initialize - ask FW to initialize the device
7442*4882a593Smuzhiyun * @adap: the adapter
7443*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7444*4882a593Smuzhiyun *
7445*4882a593Smuzhiyun * Issues a command to FW to partially initialize the device. This
7446*4882a593Smuzhiyun * performs initialization that generally doesn't depend on user input.
7447*4882a593Smuzhiyun */
t4_fw_initialize(struct adapter * adap,unsigned int mbox)7448*4882a593Smuzhiyun int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7449*4882a593Smuzhiyun {
7450*4882a593Smuzhiyun struct fw_initialize_cmd c;
7451*4882a593Smuzhiyun
7452*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7453*4882a593Smuzhiyun INIT_CMD(c, INITIALIZE, WRITE);
7454*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7455*4882a593Smuzhiyun }
7456*4882a593Smuzhiyun
7457*4882a593Smuzhiyun /**
7458*4882a593Smuzhiyun * t4_query_params_rw - query FW or device parameters
7459*4882a593Smuzhiyun * @adap: the adapter
7460*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7461*4882a593Smuzhiyun * @pf: the PF
7462*4882a593Smuzhiyun * @vf: the VF
7463*4882a593Smuzhiyun * @nparams: the number of parameters
7464*4882a593Smuzhiyun * @params: the parameter names
7465*4882a593Smuzhiyun * @val: the parameter values
7466*4882a593Smuzhiyun * @rw: Write and read flag
7467*4882a593Smuzhiyun * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7468*4882a593Smuzhiyun *
7469*4882a593Smuzhiyun * Reads the value of FW or device parameters. Up to 7 parameters can be
7470*4882a593Smuzhiyun * queried at once.
7471*4882a593Smuzhiyun */
t4_query_params_rw(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val,int rw,bool sleep_ok)7472*4882a593Smuzhiyun int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7473*4882a593Smuzhiyun unsigned int vf, unsigned int nparams, const u32 *params,
7474*4882a593Smuzhiyun u32 *val, int rw, bool sleep_ok)
7475*4882a593Smuzhiyun {
7476*4882a593Smuzhiyun int i, ret;
7477*4882a593Smuzhiyun struct fw_params_cmd c;
7478*4882a593Smuzhiyun __be32 *p = &c.param[0].mnem;
7479*4882a593Smuzhiyun
7480*4882a593Smuzhiyun if (nparams > 7)
7481*4882a593Smuzhiyun return -EINVAL;
7482*4882a593Smuzhiyun
7483*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7484*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7485*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
7486*4882a593Smuzhiyun FW_PARAMS_CMD_PFN_V(pf) |
7487*4882a593Smuzhiyun FW_PARAMS_CMD_VFN_V(vf));
7488*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7489*4882a593Smuzhiyun
7490*4882a593Smuzhiyun for (i = 0; i < nparams; i++) {
7491*4882a593Smuzhiyun *p++ = cpu_to_be32(*params++);
7492*4882a593Smuzhiyun if (rw)
7493*4882a593Smuzhiyun *p = cpu_to_be32(*(val + i));
7494*4882a593Smuzhiyun p++;
7495*4882a593Smuzhiyun }
7496*4882a593Smuzhiyun
7497*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7498*4882a593Smuzhiyun if (ret == 0)
7499*4882a593Smuzhiyun for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7500*4882a593Smuzhiyun *val++ = be32_to_cpu(*p);
7501*4882a593Smuzhiyun return ret;
7502*4882a593Smuzhiyun }
7503*4882a593Smuzhiyun
t4_query_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7504*4882a593Smuzhiyun int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7505*4882a593Smuzhiyun unsigned int vf, unsigned int nparams, const u32 *params,
7506*4882a593Smuzhiyun u32 *val)
7507*4882a593Smuzhiyun {
7508*4882a593Smuzhiyun return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7509*4882a593Smuzhiyun true);
7510*4882a593Smuzhiyun }
7511*4882a593Smuzhiyun
t4_query_params_ns(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,u32 * val)7512*4882a593Smuzhiyun int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7513*4882a593Smuzhiyun unsigned int vf, unsigned int nparams, const u32 *params,
7514*4882a593Smuzhiyun u32 *val)
7515*4882a593Smuzhiyun {
7516*4882a593Smuzhiyun return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7517*4882a593Smuzhiyun false);
7518*4882a593Smuzhiyun }
7519*4882a593Smuzhiyun
7520*4882a593Smuzhiyun /**
7521*4882a593Smuzhiyun * t4_set_params_timeout - sets FW or device parameters
7522*4882a593Smuzhiyun * @adap: the adapter
7523*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7524*4882a593Smuzhiyun * @pf: the PF
7525*4882a593Smuzhiyun * @vf: the VF
7526*4882a593Smuzhiyun * @nparams: the number of parameters
7527*4882a593Smuzhiyun * @params: the parameter names
7528*4882a593Smuzhiyun * @val: the parameter values
7529*4882a593Smuzhiyun * @timeout: the timeout time
7530*4882a593Smuzhiyun *
7531*4882a593Smuzhiyun * Sets the value of FW or device parameters. Up to 7 parameters can be
7532*4882a593Smuzhiyun * specified at once.
7533*4882a593Smuzhiyun */
t4_set_params_timeout(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val,int timeout)7534*4882a593Smuzhiyun int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7535*4882a593Smuzhiyun unsigned int pf, unsigned int vf,
7536*4882a593Smuzhiyun unsigned int nparams, const u32 *params,
7537*4882a593Smuzhiyun const u32 *val, int timeout)
7538*4882a593Smuzhiyun {
7539*4882a593Smuzhiyun struct fw_params_cmd c;
7540*4882a593Smuzhiyun __be32 *p = &c.param[0].mnem;
7541*4882a593Smuzhiyun
7542*4882a593Smuzhiyun if (nparams > 7)
7543*4882a593Smuzhiyun return -EINVAL;
7544*4882a593Smuzhiyun
7545*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7546*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7547*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7548*4882a593Smuzhiyun FW_PARAMS_CMD_PFN_V(pf) |
7549*4882a593Smuzhiyun FW_PARAMS_CMD_VFN_V(vf));
7550*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7551*4882a593Smuzhiyun
7552*4882a593Smuzhiyun while (nparams--) {
7553*4882a593Smuzhiyun *p++ = cpu_to_be32(*params++);
7554*4882a593Smuzhiyun *p++ = cpu_to_be32(*val++);
7555*4882a593Smuzhiyun }
7556*4882a593Smuzhiyun
7557*4882a593Smuzhiyun return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7558*4882a593Smuzhiyun }
7559*4882a593Smuzhiyun
7560*4882a593Smuzhiyun /**
7561*4882a593Smuzhiyun * t4_set_params - sets FW or device parameters
7562*4882a593Smuzhiyun * @adap: the adapter
7563*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7564*4882a593Smuzhiyun * @pf: the PF
7565*4882a593Smuzhiyun * @vf: the VF
7566*4882a593Smuzhiyun * @nparams: the number of parameters
7567*4882a593Smuzhiyun * @params: the parameter names
7568*4882a593Smuzhiyun * @val: the parameter values
7569*4882a593Smuzhiyun *
7570*4882a593Smuzhiyun * Sets the value of FW or device parameters. Up to 7 parameters can be
7571*4882a593Smuzhiyun * specified at once.
7572*4882a593Smuzhiyun */
t4_set_params(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int nparams,const u32 * params,const u32 * val)7573*4882a593Smuzhiyun int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7574*4882a593Smuzhiyun unsigned int vf, unsigned int nparams, const u32 *params,
7575*4882a593Smuzhiyun const u32 *val)
7576*4882a593Smuzhiyun {
7577*4882a593Smuzhiyun return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7578*4882a593Smuzhiyun FW_CMD_MAX_TIMEOUT);
7579*4882a593Smuzhiyun }
7580*4882a593Smuzhiyun
7581*4882a593Smuzhiyun /**
7582*4882a593Smuzhiyun * t4_cfg_pfvf - configure PF/VF resource limits
7583*4882a593Smuzhiyun * @adap: the adapter
7584*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7585*4882a593Smuzhiyun * @pf: the PF being configured
7586*4882a593Smuzhiyun * @vf: the VF being configured
7587*4882a593Smuzhiyun * @txq: the max number of egress queues
7588*4882a593Smuzhiyun * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7589*4882a593Smuzhiyun * @rxqi: the max number of interrupt-capable ingress queues
7590*4882a593Smuzhiyun * @rxq: the max number of interruptless ingress queues
7591*4882a593Smuzhiyun * @tc: the PCI traffic class
7592*4882a593Smuzhiyun * @vi: the max number of virtual interfaces
7593*4882a593Smuzhiyun * @cmask: the channel access rights mask for the PF/VF
7594*4882a593Smuzhiyun * @pmask: the port access rights mask for the PF/VF
7595*4882a593Smuzhiyun * @nexact: the maximum number of exact MPS filters
7596*4882a593Smuzhiyun * @rcaps: read capabilities
7597*4882a593Smuzhiyun * @wxcaps: write/execute capabilities
7598*4882a593Smuzhiyun *
7599*4882a593Smuzhiyun * Configures resource limits and capabilities for a physical or virtual
7600*4882a593Smuzhiyun * function.
7601*4882a593Smuzhiyun */
t4_cfg_pfvf(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int txq,unsigned int txq_eth_ctrl,unsigned int rxqi,unsigned int rxq,unsigned int tc,unsigned int vi,unsigned int cmask,unsigned int pmask,unsigned int nexact,unsigned int rcaps,unsigned int wxcaps)7602*4882a593Smuzhiyun int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7603*4882a593Smuzhiyun unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7604*4882a593Smuzhiyun unsigned int rxqi, unsigned int rxq, unsigned int tc,
7605*4882a593Smuzhiyun unsigned int vi, unsigned int cmask, unsigned int pmask,
7606*4882a593Smuzhiyun unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7607*4882a593Smuzhiyun {
7608*4882a593Smuzhiyun struct fw_pfvf_cmd c;
7609*4882a593Smuzhiyun
7610*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7611*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7612*4882a593Smuzhiyun FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7613*4882a593Smuzhiyun FW_PFVF_CMD_VFN_V(vf));
7614*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7615*4882a593Smuzhiyun c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7616*4882a593Smuzhiyun FW_PFVF_CMD_NIQ_V(rxq));
7617*4882a593Smuzhiyun c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7618*4882a593Smuzhiyun FW_PFVF_CMD_PMASK_V(pmask) |
7619*4882a593Smuzhiyun FW_PFVF_CMD_NEQ_V(txq));
7620*4882a593Smuzhiyun c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7621*4882a593Smuzhiyun FW_PFVF_CMD_NVI_V(vi) |
7622*4882a593Smuzhiyun FW_PFVF_CMD_NEXACTF_V(nexact));
7623*4882a593Smuzhiyun c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7624*4882a593Smuzhiyun FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7625*4882a593Smuzhiyun FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7626*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7627*4882a593Smuzhiyun }
7628*4882a593Smuzhiyun
7629*4882a593Smuzhiyun /**
7630*4882a593Smuzhiyun * t4_alloc_vi - allocate a virtual interface
7631*4882a593Smuzhiyun * @adap: the adapter
7632*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7633*4882a593Smuzhiyun * @port: physical port associated with the VI
7634*4882a593Smuzhiyun * @pf: the PF owning the VI
7635*4882a593Smuzhiyun * @vf: the VF owning the VI
7636*4882a593Smuzhiyun * @nmac: number of MAC addresses needed (1 to 5)
7637*4882a593Smuzhiyun * @mac: the MAC addresses of the VI
7638*4882a593Smuzhiyun * @rss_size: size of RSS table slice associated with this VI
7639*4882a593Smuzhiyun * @vivld: the destination to store the VI Valid value.
7640*4882a593Smuzhiyun * @vin: the destination to store the VIN value.
7641*4882a593Smuzhiyun *
7642*4882a593Smuzhiyun * Allocates a virtual interface for the given physical port. If @mac is
7643*4882a593Smuzhiyun * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7644*4882a593Smuzhiyun * @mac should be large enough to hold @nmac Ethernet addresses, they are
7645*4882a593Smuzhiyun * stored consecutively so the space needed is @nmac * 6 bytes.
7646*4882a593Smuzhiyun * Returns a negative error number or the non-negative VI id.
7647*4882a593Smuzhiyun */
t4_alloc_vi(struct adapter * adap,unsigned int mbox,unsigned int port,unsigned int pf,unsigned int vf,unsigned int nmac,u8 * mac,unsigned int * rss_size,u8 * vivld,u8 * vin)7648*4882a593Smuzhiyun int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7649*4882a593Smuzhiyun unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7650*4882a593Smuzhiyun unsigned int *rss_size, u8 *vivld, u8 *vin)
7651*4882a593Smuzhiyun {
7652*4882a593Smuzhiyun int ret;
7653*4882a593Smuzhiyun struct fw_vi_cmd c;
7654*4882a593Smuzhiyun
7655*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7656*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7657*4882a593Smuzhiyun FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7658*4882a593Smuzhiyun FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7659*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7660*4882a593Smuzhiyun c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7661*4882a593Smuzhiyun c.nmac = nmac - 1;
7662*4882a593Smuzhiyun
7663*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7664*4882a593Smuzhiyun if (ret)
7665*4882a593Smuzhiyun return ret;
7666*4882a593Smuzhiyun
7667*4882a593Smuzhiyun if (mac) {
7668*4882a593Smuzhiyun memcpy(mac, c.mac, sizeof(c.mac));
7669*4882a593Smuzhiyun switch (nmac) {
7670*4882a593Smuzhiyun case 5:
7671*4882a593Smuzhiyun memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7672*4882a593Smuzhiyun fallthrough;
7673*4882a593Smuzhiyun case 4:
7674*4882a593Smuzhiyun memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7675*4882a593Smuzhiyun fallthrough;
7676*4882a593Smuzhiyun case 3:
7677*4882a593Smuzhiyun memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7678*4882a593Smuzhiyun fallthrough;
7679*4882a593Smuzhiyun case 2:
7680*4882a593Smuzhiyun memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7681*4882a593Smuzhiyun }
7682*4882a593Smuzhiyun }
7683*4882a593Smuzhiyun if (rss_size)
7684*4882a593Smuzhiyun *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7685*4882a593Smuzhiyun
7686*4882a593Smuzhiyun if (vivld)
7687*4882a593Smuzhiyun *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16));
7688*4882a593Smuzhiyun
7689*4882a593Smuzhiyun if (vin)
7690*4882a593Smuzhiyun *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16));
7691*4882a593Smuzhiyun
7692*4882a593Smuzhiyun return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7693*4882a593Smuzhiyun }
7694*4882a593Smuzhiyun
7695*4882a593Smuzhiyun /**
7696*4882a593Smuzhiyun * t4_free_vi - free a virtual interface
7697*4882a593Smuzhiyun * @adap: the adapter
7698*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7699*4882a593Smuzhiyun * @pf: the PF owning the VI
7700*4882a593Smuzhiyun * @vf: the VF owning the VI
7701*4882a593Smuzhiyun * @viid: virtual interface identifiler
7702*4882a593Smuzhiyun *
7703*4882a593Smuzhiyun * Free a previously allocated virtual interface.
7704*4882a593Smuzhiyun */
t4_free_vi(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int viid)7705*4882a593Smuzhiyun int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7706*4882a593Smuzhiyun unsigned int vf, unsigned int viid)
7707*4882a593Smuzhiyun {
7708*4882a593Smuzhiyun struct fw_vi_cmd c;
7709*4882a593Smuzhiyun
7710*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7711*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7712*4882a593Smuzhiyun FW_CMD_REQUEST_F |
7713*4882a593Smuzhiyun FW_CMD_EXEC_F |
7714*4882a593Smuzhiyun FW_VI_CMD_PFN_V(pf) |
7715*4882a593Smuzhiyun FW_VI_CMD_VFN_V(vf));
7716*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7717*4882a593Smuzhiyun c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7718*4882a593Smuzhiyun
7719*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7720*4882a593Smuzhiyun }
7721*4882a593Smuzhiyun
7722*4882a593Smuzhiyun /**
7723*4882a593Smuzhiyun * t4_set_rxmode - set Rx properties of a virtual interface
7724*4882a593Smuzhiyun * @adap: the adapter
7725*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7726*4882a593Smuzhiyun * @viid: the VI id
7727*4882a593Smuzhiyun * @viid_mirror: the mirror VI id
7728*4882a593Smuzhiyun * @mtu: the new MTU or -1
7729*4882a593Smuzhiyun * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7730*4882a593Smuzhiyun * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7731*4882a593Smuzhiyun * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7732*4882a593Smuzhiyun * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7733*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
7734*4882a593Smuzhiyun *
7735*4882a593Smuzhiyun * Sets Rx properties of a virtual interface.
7736*4882a593Smuzhiyun */
t4_set_rxmode(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int viid_mirror,int mtu,int promisc,int all_multi,int bcast,int vlanex,bool sleep_ok)7737*4882a593Smuzhiyun int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7738*4882a593Smuzhiyun unsigned int viid_mirror, int mtu, int promisc, int all_multi,
7739*4882a593Smuzhiyun int bcast, int vlanex, bool sleep_ok)
7740*4882a593Smuzhiyun {
7741*4882a593Smuzhiyun struct fw_vi_rxmode_cmd c, c_mirror;
7742*4882a593Smuzhiyun int ret;
7743*4882a593Smuzhiyun
7744*4882a593Smuzhiyun /* convert to FW values */
7745*4882a593Smuzhiyun if (mtu < 0)
7746*4882a593Smuzhiyun mtu = FW_RXMODE_MTU_NO_CHG;
7747*4882a593Smuzhiyun if (promisc < 0)
7748*4882a593Smuzhiyun promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7749*4882a593Smuzhiyun if (all_multi < 0)
7750*4882a593Smuzhiyun all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7751*4882a593Smuzhiyun if (bcast < 0)
7752*4882a593Smuzhiyun bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7753*4882a593Smuzhiyun if (vlanex < 0)
7754*4882a593Smuzhiyun vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7755*4882a593Smuzhiyun
7756*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7757*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7758*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7759*4882a593Smuzhiyun FW_VI_RXMODE_CMD_VIID_V(viid));
7760*4882a593Smuzhiyun c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7761*4882a593Smuzhiyun c.mtu_to_vlanexen =
7762*4882a593Smuzhiyun cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7763*4882a593Smuzhiyun FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7764*4882a593Smuzhiyun FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7765*4882a593Smuzhiyun FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7766*4882a593Smuzhiyun FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7767*4882a593Smuzhiyun
7768*4882a593Smuzhiyun if (viid_mirror) {
7769*4882a593Smuzhiyun memcpy(&c_mirror, &c, sizeof(c_mirror));
7770*4882a593Smuzhiyun c_mirror.op_to_viid =
7771*4882a593Smuzhiyun cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7772*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7773*4882a593Smuzhiyun FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
7774*4882a593Smuzhiyun }
7775*4882a593Smuzhiyun
7776*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7777*4882a593Smuzhiyun if (ret)
7778*4882a593Smuzhiyun return ret;
7779*4882a593Smuzhiyun
7780*4882a593Smuzhiyun if (viid_mirror)
7781*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
7782*4882a593Smuzhiyun NULL, sleep_ok);
7783*4882a593Smuzhiyun
7784*4882a593Smuzhiyun return ret;
7785*4882a593Smuzhiyun }
7786*4882a593Smuzhiyun
7787*4882a593Smuzhiyun /**
7788*4882a593Smuzhiyun * t4_free_encap_mac_filt - frees MPS entry at given index
7789*4882a593Smuzhiyun * @adap: the adapter
7790*4882a593Smuzhiyun * @viid: the VI id
7791*4882a593Smuzhiyun * @idx: index of MPS entry to be freed
7792*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
7793*4882a593Smuzhiyun *
7794*4882a593Smuzhiyun * Frees the MPS entry at supplied index
7795*4882a593Smuzhiyun *
7796*4882a593Smuzhiyun * Returns a negative error number or zero on success
7797*4882a593Smuzhiyun */
t4_free_encap_mac_filt(struct adapter * adap,unsigned int viid,int idx,bool sleep_ok)7798*4882a593Smuzhiyun int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7799*4882a593Smuzhiyun int idx, bool sleep_ok)
7800*4882a593Smuzhiyun {
7801*4882a593Smuzhiyun struct fw_vi_mac_exact *p;
7802*4882a593Smuzhiyun u8 addr[] = {0, 0, 0, 0, 0, 0};
7803*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
7804*4882a593Smuzhiyun int ret = 0;
7805*4882a593Smuzhiyun u32 exact;
7806*4882a593Smuzhiyun
7807*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7808*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7809*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7810*4882a593Smuzhiyun FW_CMD_EXEC_V(0) |
7811*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
7812*4882a593Smuzhiyun exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7813*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7814*4882a593Smuzhiyun exact |
7815*4882a593Smuzhiyun FW_CMD_LEN16_V(1));
7816*4882a593Smuzhiyun p = c.u.exact;
7817*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7818*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(idx));
7819*4882a593Smuzhiyun memcpy(p->macaddr, addr, sizeof(p->macaddr));
7820*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7821*4882a593Smuzhiyun return ret;
7822*4882a593Smuzhiyun }
7823*4882a593Smuzhiyun
7824*4882a593Smuzhiyun /**
7825*4882a593Smuzhiyun * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7826*4882a593Smuzhiyun * @adap: the adapter
7827*4882a593Smuzhiyun * @viid: the VI id
7828*4882a593Smuzhiyun * @addr: the MAC address
7829*4882a593Smuzhiyun * @mask: the mask
7830*4882a593Smuzhiyun * @idx: index of the entry in mps tcam
7831*4882a593Smuzhiyun * @lookup_type: MAC address for inner (1) or outer (0) header
7832*4882a593Smuzhiyun * @port_id: the port index
7833*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
7834*4882a593Smuzhiyun *
7835*4882a593Smuzhiyun * Removes the mac entry at the specified index using raw mac interface.
7836*4882a593Smuzhiyun *
7837*4882a593Smuzhiyun * Returns a negative error number on failure.
7838*4882a593Smuzhiyun */
t4_free_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)7839*4882a593Smuzhiyun int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7840*4882a593Smuzhiyun const u8 *addr, const u8 *mask, unsigned int idx,
7841*4882a593Smuzhiyun u8 lookup_type, u8 port_id, bool sleep_ok)
7842*4882a593Smuzhiyun {
7843*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
7844*4882a593Smuzhiyun struct fw_vi_mac_raw *p = &c.u.raw;
7845*4882a593Smuzhiyun u32 val;
7846*4882a593Smuzhiyun
7847*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7848*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7849*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7850*4882a593Smuzhiyun FW_CMD_EXEC_V(0) |
7851*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
7852*4882a593Smuzhiyun val = FW_CMD_LEN16_V(1) |
7853*4882a593Smuzhiyun FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7854*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7855*4882a593Smuzhiyun FW_CMD_LEN16_V(val));
7856*4882a593Smuzhiyun
7857*4882a593Smuzhiyun p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7858*4882a593Smuzhiyun FW_VI_MAC_ID_BASED_FREE);
7859*4882a593Smuzhiyun
7860*4882a593Smuzhiyun /* Lookup Type. Outer header: 0, Inner header: 1 */
7861*4882a593Smuzhiyun p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7862*4882a593Smuzhiyun DATAPORTNUM_V(port_id));
7863*4882a593Smuzhiyun /* Lookup mask and port mask */
7864*4882a593Smuzhiyun p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7865*4882a593Smuzhiyun DATAPORTNUM_V(DATAPORTNUM_M));
7866*4882a593Smuzhiyun
7867*4882a593Smuzhiyun /* Copy the address and the mask */
7868*4882a593Smuzhiyun memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7869*4882a593Smuzhiyun memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7870*4882a593Smuzhiyun
7871*4882a593Smuzhiyun return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7872*4882a593Smuzhiyun }
7873*4882a593Smuzhiyun
7874*4882a593Smuzhiyun /**
7875*4882a593Smuzhiyun * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7876*4882a593Smuzhiyun * @adap: the adapter
7877*4882a593Smuzhiyun * @viid: the VI id
7878*4882a593Smuzhiyun * @addr: the MAC address
7879*4882a593Smuzhiyun * @mask: the mask
7880*4882a593Smuzhiyun * @vni: the VNI id for the tunnel protocol
7881*4882a593Smuzhiyun * @vni_mask: mask for the VNI id
7882*4882a593Smuzhiyun * @dip_hit: to enable DIP match for the MPS entry
7883*4882a593Smuzhiyun * @lookup_type: MAC address for inner (1) or outer (0) header
7884*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
7885*4882a593Smuzhiyun *
7886*4882a593Smuzhiyun * Allocates an MPS entry with specified MAC address and VNI value.
7887*4882a593Smuzhiyun *
7888*4882a593Smuzhiyun * Returns a negative error number or the allocated index for this mac.
7889*4882a593Smuzhiyun */
t4_alloc_encap_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int vni,unsigned int vni_mask,u8 dip_hit,u8 lookup_type,bool sleep_ok)7890*4882a593Smuzhiyun int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7891*4882a593Smuzhiyun const u8 *addr, const u8 *mask, unsigned int vni,
7892*4882a593Smuzhiyun unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7893*4882a593Smuzhiyun bool sleep_ok)
7894*4882a593Smuzhiyun {
7895*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
7896*4882a593Smuzhiyun struct fw_vi_mac_vni *p = c.u.exact_vni;
7897*4882a593Smuzhiyun int ret = 0;
7898*4882a593Smuzhiyun u32 val;
7899*4882a593Smuzhiyun
7900*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7901*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7902*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7903*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
7904*4882a593Smuzhiyun val = FW_CMD_LEN16_V(1) |
7905*4882a593Smuzhiyun FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7906*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(val);
7907*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7908*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7909*4882a593Smuzhiyun memcpy(p->macaddr, addr, sizeof(p->macaddr));
7910*4882a593Smuzhiyun memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7911*4882a593Smuzhiyun
7912*4882a593Smuzhiyun p->lookup_type_to_vni =
7913*4882a593Smuzhiyun cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7914*4882a593Smuzhiyun FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7915*4882a593Smuzhiyun FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7916*4882a593Smuzhiyun p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7917*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7918*4882a593Smuzhiyun if (ret == 0)
7919*4882a593Smuzhiyun ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7920*4882a593Smuzhiyun return ret;
7921*4882a593Smuzhiyun }
7922*4882a593Smuzhiyun
7923*4882a593Smuzhiyun /**
7924*4882a593Smuzhiyun * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7925*4882a593Smuzhiyun * @adap: the adapter
7926*4882a593Smuzhiyun * @viid: the VI id
7927*4882a593Smuzhiyun * @addr: the MAC address
7928*4882a593Smuzhiyun * @mask: the mask
7929*4882a593Smuzhiyun * @idx: index at which to add this entry
7930*4882a593Smuzhiyun * @lookup_type: MAC address for inner (1) or outer (0) header
7931*4882a593Smuzhiyun * @port_id: the port index
7932*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
7933*4882a593Smuzhiyun *
7934*4882a593Smuzhiyun * Adds the mac entry at the specified index using raw mac interface.
7935*4882a593Smuzhiyun *
7936*4882a593Smuzhiyun * Returns a negative error number or the allocated index for this mac.
7937*4882a593Smuzhiyun */
t4_alloc_raw_mac_filt(struct adapter * adap,unsigned int viid,const u8 * addr,const u8 * mask,unsigned int idx,u8 lookup_type,u8 port_id,bool sleep_ok)7938*4882a593Smuzhiyun int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7939*4882a593Smuzhiyun const u8 *addr, const u8 *mask, unsigned int idx,
7940*4882a593Smuzhiyun u8 lookup_type, u8 port_id, bool sleep_ok)
7941*4882a593Smuzhiyun {
7942*4882a593Smuzhiyun int ret = 0;
7943*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
7944*4882a593Smuzhiyun struct fw_vi_mac_raw *p = &c.u.raw;
7945*4882a593Smuzhiyun u32 val;
7946*4882a593Smuzhiyun
7947*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
7948*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7949*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7950*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
7951*4882a593Smuzhiyun val = FW_CMD_LEN16_V(1) |
7952*4882a593Smuzhiyun FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7953*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(val);
7954*4882a593Smuzhiyun
7955*4882a593Smuzhiyun /* Specify that this is an inner mac address */
7956*4882a593Smuzhiyun p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7957*4882a593Smuzhiyun
7958*4882a593Smuzhiyun /* Lookup Type. Outer header: 0, Inner header: 1 */
7959*4882a593Smuzhiyun p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7960*4882a593Smuzhiyun DATAPORTNUM_V(port_id));
7961*4882a593Smuzhiyun /* Lookup mask and port mask */
7962*4882a593Smuzhiyun p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7963*4882a593Smuzhiyun DATAPORTNUM_V(DATAPORTNUM_M));
7964*4882a593Smuzhiyun
7965*4882a593Smuzhiyun /* Copy the address and the mask */
7966*4882a593Smuzhiyun memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7967*4882a593Smuzhiyun memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7968*4882a593Smuzhiyun
7969*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7970*4882a593Smuzhiyun if (ret == 0) {
7971*4882a593Smuzhiyun ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7972*4882a593Smuzhiyun if (ret != idx)
7973*4882a593Smuzhiyun ret = -ENOMEM;
7974*4882a593Smuzhiyun }
7975*4882a593Smuzhiyun
7976*4882a593Smuzhiyun return ret;
7977*4882a593Smuzhiyun }
7978*4882a593Smuzhiyun
7979*4882a593Smuzhiyun /**
7980*4882a593Smuzhiyun * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7981*4882a593Smuzhiyun * @adap: the adapter
7982*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
7983*4882a593Smuzhiyun * @viid: the VI id
7984*4882a593Smuzhiyun * @free: if true any existing filters for this VI id are first removed
7985*4882a593Smuzhiyun * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7986*4882a593Smuzhiyun * @addr: the MAC address(es)
7987*4882a593Smuzhiyun * @idx: where to store the index of each allocated filter
7988*4882a593Smuzhiyun * @hash: pointer to hash address filter bitmap
7989*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
7990*4882a593Smuzhiyun *
7991*4882a593Smuzhiyun * Allocates an exact-match filter for each of the supplied addresses and
7992*4882a593Smuzhiyun * sets it to the corresponding address. If @idx is not %NULL it should
7993*4882a593Smuzhiyun * have at least @naddr entries, each of which will be set to the index of
7994*4882a593Smuzhiyun * the filter allocated for the corresponding MAC address. If a filter
7995*4882a593Smuzhiyun * could not be allocated for an address its index is set to 0xffff.
7996*4882a593Smuzhiyun * If @hash is not %NULL addresses that fail to allocate an exact filter
7997*4882a593Smuzhiyun * are hashed and update the hash filter bitmap pointed at by @hash.
7998*4882a593Smuzhiyun *
7999*4882a593Smuzhiyun * Returns a negative error number or the number of filters allocated.
8000*4882a593Smuzhiyun */
t4_alloc_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,bool free,unsigned int naddr,const u8 ** addr,u16 * idx,u64 * hash,bool sleep_ok)8001*4882a593Smuzhiyun int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
8002*4882a593Smuzhiyun unsigned int viid, bool free, unsigned int naddr,
8003*4882a593Smuzhiyun const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
8004*4882a593Smuzhiyun {
8005*4882a593Smuzhiyun int offset, ret = 0;
8006*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
8007*4882a593Smuzhiyun unsigned int nfilters = 0;
8008*4882a593Smuzhiyun unsigned int max_naddr = adap->params.arch.mps_tcam_size;
8009*4882a593Smuzhiyun unsigned int rem = naddr;
8010*4882a593Smuzhiyun
8011*4882a593Smuzhiyun if (naddr > max_naddr)
8012*4882a593Smuzhiyun return -EINVAL;
8013*4882a593Smuzhiyun
8014*4882a593Smuzhiyun for (offset = 0; offset < naddr ; /**/) {
8015*4882a593Smuzhiyun unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
8016*4882a593Smuzhiyun rem : ARRAY_SIZE(c.u.exact));
8017*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8018*4882a593Smuzhiyun u.exact[fw_naddr]), 16);
8019*4882a593Smuzhiyun struct fw_vi_mac_exact *p;
8020*4882a593Smuzhiyun int i;
8021*4882a593Smuzhiyun
8022*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8023*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8024*4882a593Smuzhiyun FW_CMD_REQUEST_F |
8025*4882a593Smuzhiyun FW_CMD_WRITE_F |
8026*4882a593Smuzhiyun FW_CMD_EXEC_V(free) |
8027*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
8028*4882a593Smuzhiyun c.freemacs_to_len16 =
8029*4882a593Smuzhiyun cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
8030*4882a593Smuzhiyun FW_CMD_LEN16_V(len16));
8031*4882a593Smuzhiyun
8032*4882a593Smuzhiyun for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8033*4882a593Smuzhiyun p->valid_to_idx =
8034*4882a593Smuzhiyun cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8035*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(
8036*4882a593Smuzhiyun FW_VI_MAC_ADD_MAC));
8037*4882a593Smuzhiyun memcpy(p->macaddr, addr[offset + i],
8038*4882a593Smuzhiyun sizeof(p->macaddr));
8039*4882a593Smuzhiyun }
8040*4882a593Smuzhiyun
8041*4882a593Smuzhiyun /* It's okay if we run out of space in our MAC address arena.
8042*4882a593Smuzhiyun * Some of the addresses we submit may get stored so we need
8043*4882a593Smuzhiyun * to run through the reply to see what the results were ...
8044*4882a593Smuzhiyun */
8045*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8046*4882a593Smuzhiyun if (ret && ret != -FW_ENOMEM)
8047*4882a593Smuzhiyun break;
8048*4882a593Smuzhiyun
8049*4882a593Smuzhiyun for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8050*4882a593Smuzhiyun u16 index = FW_VI_MAC_CMD_IDX_G(
8051*4882a593Smuzhiyun be16_to_cpu(p->valid_to_idx));
8052*4882a593Smuzhiyun
8053*4882a593Smuzhiyun if (idx)
8054*4882a593Smuzhiyun idx[offset + i] = (index >= max_naddr ?
8055*4882a593Smuzhiyun 0xffff : index);
8056*4882a593Smuzhiyun if (index < max_naddr)
8057*4882a593Smuzhiyun nfilters++;
8058*4882a593Smuzhiyun else if (hash)
8059*4882a593Smuzhiyun *hash |= (1ULL <<
8060*4882a593Smuzhiyun hash_mac_addr(addr[offset + i]));
8061*4882a593Smuzhiyun }
8062*4882a593Smuzhiyun
8063*4882a593Smuzhiyun free = false;
8064*4882a593Smuzhiyun offset += fw_naddr;
8065*4882a593Smuzhiyun rem -= fw_naddr;
8066*4882a593Smuzhiyun }
8067*4882a593Smuzhiyun
8068*4882a593Smuzhiyun if (ret == 0 || ret == -FW_ENOMEM)
8069*4882a593Smuzhiyun ret = nfilters;
8070*4882a593Smuzhiyun return ret;
8071*4882a593Smuzhiyun }
8072*4882a593Smuzhiyun
8073*4882a593Smuzhiyun /**
8074*4882a593Smuzhiyun * t4_free_mac_filt - frees exact-match filters of given MAC addresses
8075*4882a593Smuzhiyun * @adap: the adapter
8076*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8077*4882a593Smuzhiyun * @viid: the VI id
8078*4882a593Smuzhiyun * @naddr: the number of MAC addresses to allocate filters for (up to 7)
8079*4882a593Smuzhiyun * @addr: the MAC address(es)
8080*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
8081*4882a593Smuzhiyun *
8082*4882a593Smuzhiyun * Frees the exact-match filter for each of the supplied addresses
8083*4882a593Smuzhiyun *
8084*4882a593Smuzhiyun * Returns a negative error number or the number of filters freed.
8085*4882a593Smuzhiyun */
t4_free_mac_filt(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int naddr,const u8 ** addr,bool sleep_ok)8086*4882a593Smuzhiyun int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
8087*4882a593Smuzhiyun unsigned int viid, unsigned int naddr,
8088*4882a593Smuzhiyun const u8 **addr, bool sleep_ok)
8089*4882a593Smuzhiyun {
8090*4882a593Smuzhiyun int offset, ret = 0;
8091*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
8092*4882a593Smuzhiyun unsigned int nfilters = 0;
8093*4882a593Smuzhiyun unsigned int max_naddr = is_t4(adap->params.chip) ?
8094*4882a593Smuzhiyun NUM_MPS_CLS_SRAM_L_INSTANCES :
8095*4882a593Smuzhiyun NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8096*4882a593Smuzhiyun unsigned int rem = naddr;
8097*4882a593Smuzhiyun
8098*4882a593Smuzhiyun if (naddr > max_naddr)
8099*4882a593Smuzhiyun return -EINVAL;
8100*4882a593Smuzhiyun
8101*4882a593Smuzhiyun for (offset = 0; offset < (int)naddr ; /**/) {
8102*4882a593Smuzhiyun unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
8103*4882a593Smuzhiyun ? rem
8104*4882a593Smuzhiyun : ARRAY_SIZE(c.u.exact));
8105*4882a593Smuzhiyun size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
8106*4882a593Smuzhiyun u.exact[fw_naddr]), 16);
8107*4882a593Smuzhiyun struct fw_vi_mac_exact *p;
8108*4882a593Smuzhiyun int i;
8109*4882a593Smuzhiyun
8110*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8111*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8112*4882a593Smuzhiyun FW_CMD_REQUEST_F |
8113*4882a593Smuzhiyun FW_CMD_WRITE_F |
8114*4882a593Smuzhiyun FW_CMD_EXEC_V(0) |
8115*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
8116*4882a593Smuzhiyun c.freemacs_to_len16 =
8117*4882a593Smuzhiyun cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
8118*4882a593Smuzhiyun FW_CMD_LEN16_V(len16));
8119*4882a593Smuzhiyun
8120*4882a593Smuzhiyun for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
8121*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(
8122*4882a593Smuzhiyun FW_VI_MAC_CMD_VALID_F |
8123*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
8124*4882a593Smuzhiyun memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
8125*4882a593Smuzhiyun }
8126*4882a593Smuzhiyun
8127*4882a593Smuzhiyun ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
8128*4882a593Smuzhiyun if (ret)
8129*4882a593Smuzhiyun break;
8130*4882a593Smuzhiyun
8131*4882a593Smuzhiyun for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
8132*4882a593Smuzhiyun u16 index = FW_VI_MAC_CMD_IDX_G(
8133*4882a593Smuzhiyun be16_to_cpu(p->valid_to_idx));
8134*4882a593Smuzhiyun
8135*4882a593Smuzhiyun if (index < max_naddr)
8136*4882a593Smuzhiyun nfilters++;
8137*4882a593Smuzhiyun }
8138*4882a593Smuzhiyun
8139*4882a593Smuzhiyun offset += fw_naddr;
8140*4882a593Smuzhiyun rem -= fw_naddr;
8141*4882a593Smuzhiyun }
8142*4882a593Smuzhiyun
8143*4882a593Smuzhiyun if (ret == 0)
8144*4882a593Smuzhiyun ret = nfilters;
8145*4882a593Smuzhiyun return ret;
8146*4882a593Smuzhiyun }
8147*4882a593Smuzhiyun
8148*4882a593Smuzhiyun /**
8149*4882a593Smuzhiyun * t4_change_mac - modifies the exact-match filter for a MAC address
8150*4882a593Smuzhiyun * @adap: the adapter
8151*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8152*4882a593Smuzhiyun * @viid: the VI id
8153*4882a593Smuzhiyun * @idx: index of existing filter for old value of MAC address, or -1
8154*4882a593Smuzhiyun * @addr: the new MAC address value
8155*4882a593Smuzhiyun * @persist: whether a new MAC allocation should be persistent
8156*4882a593Smuzhiyun * @smt_idx: the destination to store the new SMT index.
8157*4882a593Smuzhiyun *
8158*4882a593Smuzhiyun * Modifies an exact-match filter and sets it to the new MAC address.
8159*4882a593Smuzhiyun * Note that in general it is not possible to modify the value of a given
8160*4882a593Smuzhiyun * filter so the generic way to modify an address filter is to free the one
8161*4882a593Smuzhiyun * being used by the old address value and allocate a new filter for the
8162*4882a593Smuzhiyun * new address value. @idx can be -1 if the address is a new addition.
8163*4882a593Smuzhiyun *
8164*4882a593Smuzhiyun * Returns a negative error number or the index of the filter with the new
8165*4882a593Smuzhiyun * MAC value.
8166*4882a593Smuzhiyun */
t4_change_mac(struct adapter * adap,unsigned int mbox,unsigned int viid,int idx,const u8 * addr,bool persist,u8 * smt_idx)8167*4882a593Smuzhiyun int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
8168*4882a593Smuzhiyun int idx, const u8 *addr, bool persist, u8 *smt_idx)
8169*4882a593Smuzhiyun {
8170*4882a593Smuzhiyun int ret, mode;
8171*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
8172*4882a593Smuzhiyun struct fw_vi_mac_exact *p = c.u.exact;
8173*4882a593Smuzhiyun unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
8174*4882a593Smuzhiyun
8175*4882a593Smuzhiyun if (idx < 0) /* new allocation */
8176*4882a593Smuzhiyun idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
8177*4882a593Smuzhiyun mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
8178*4882a593Smuzhiyun
8179*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8180*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8181*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8182*4882a593Smuzhiyun FW_VI_MAC_CMD_VIID_V(viid));
8183*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
8184*4882a593Smuzhiyun p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
8185*4882a593Smuzhiyun FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
8186*4882a593Smuzhiyun FW_VI_MAC_CMD_IDX_V(idx));
8187*4882a593Smuzhiyun memcpy(p->macaddr, addr, sizeof(p->macaddr));
8188*4882a593Smuzhiyun
8189*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8190*4882a593Smuzhiyun if (ret == 0) {
8191*4882a593Smuzhiyun ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8192*4882a593Smuzhiyun if (ret >= max_mac_addr)
8193*4882a593Smuzhiyun ret = -ENOMEM;
8194*4882a593Smuzhiyun if (smt_idx) {
8195*4882a593Smuzhiyun if (adap->params.viid_smt_extn_support) {
8196*4882a593Smuzhiyun *smt_idx = FW_VI_MAC_CMD_SMTID_G
8197*4882a593Smuzhiyun (be32_to_cpu(c.op_to_viid));
8198*4882a593Smuzhiyun } else {
8199*4882a593Smuzhiyun /* In T4/T5, SMT contains 256 SMAC entries
8200*4882a593Smuzhiyun * organized in 128 rows of 2 entries each.
8201*4882a593Smuzhiyun * In T6, SMT contains 256 SMAC entries in
8202*4882a593Smuzhiyun * 256 rows.
8203*4882a593Smuzhiyun */
8204*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) <=
8205*4882a593Smuzhiyun CHELSIO_T5)
8206*4882a593Smuzhiyun *smt_idx = (viid & FW_VIID_VIN_M) << 1;
8207*4882a593Smuzhiyun else
8208*4882a593Smuzhiyun *smt_idx = (viid & FW_VIID_VIN_M);
8209*4882a593Smuzhiyun }
8210*4882a593Smuzhiyun }
8211*4882a593Smuzhiyun }
8212*4882a593Smuzhiyun return ret;
8213*4882a593Smuzhiyun }
8214*4882a593Smuzhiyun
8215*4882a593Smuzhiyun /**
8216*4882a593Smuzhiyun * t4_set_addr_hash - program the MAC inexact-match hash filter
8217*4882a593Smuzhiyun * @adap: the adapter
8218*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8219*4882a593Smuzhiyun * @viid: the VI id
8220*4882a593Smuzhiyun * @ucast: whether the hash filter should also match unicast addresses
8221*4882a593Smuzhiyun * @vec: the value to be written to the hash filter
8222*4882a593Smuzhiyun * @sleep_ok: call is allowed to sleep
8223*4882a593Smuzhiyun *
8224*4882a593Smuzhiyun * Sets the 64-bit inexact-match hash filter for a virtual interface.
8225*4882a593Smuzhiyun */
t4_set_addr_hash(struct adapter * adap,unsigned int mbox,unsigned int viid,bool ucast,u64 vec,bool sleep_ok)8226*4882a593Smuzhiyun int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8227*4882a593Smuzhiyun bool ucast, u64 vec, bool sleep_ok)
8228*4882a593Smuzhiyun {
8229*4882a593Smuzhiyun struct fw_vi_mac_cmd c;
8230*4882a593Smuzhiyun
8231*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8232*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8233*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8234*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
8235*4882a593Smuzhiyun c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8236*4882a593Smuzhiyun FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8237*4882a593Smuzhiyun FW_CMD_LEN16_V(1));
8238*4882a593Smuzhiyun c.u.hash.hashvec = cpu_to_be64(vec);
8239*4882a593Smuzhiyun return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8240*4882a593Smuzhiyun }
8241*4882a593Smuzhiyun
8242*4882a593Smuzhiyun /**
8243*4882a593Smuzhiyun * t4_enable_vi_params - enable/disable a virtual interface
8244*4882a593Smuzhiyun * @adap: the adapter
8245*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8246*4882a593Smuzhiyun * @viid: the VI id
8247*4882a593Smuzhiyun * @rx_en: 1=enable Rx, 0=disable Rx
8248*4882a593Smuzhiyun * @tx_en: 1=enable Tx, 0=disable Tx
8249*4882a593Smuzhiyun * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8250*4882a593Smuzhiyun *
8251*4882a593Smuzhiyun * Enables/disables a virtual interface. Note that setting DCB Enable
8252*4882a593Smuzhiyun * only makes sense when enabling a Virtual Interface ...
8253*4882a593Smuzhiyun */
t4_enable_vi_params(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en,bool dcb_en)8254*4882a593Smuzhiyun int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8255*4882a593Smuzhiyun unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8256*4882a593Smuzhiyun {
8257*4882a593Smuzhiyun struct fw_vi_enable_cmd c;
8258*4882a593Smuzhiyun
8259*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8260*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8261*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8262*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
8263*4882a593Smuzhiyun c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8264*4882a593Smuzhiyun FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8265*4882a593Smuzhiyun FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8266*4882a593Smuzhiyun FW_LEN16(c));
8267*4882a593Smuzhiyun return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8268*4882a593Smuzhiyun }
8269*4882a593Smuzhiyun
8270*4882a593Smuzhiyun /**
8271*4882a593Smuzhiyun * t4_enable_vi - enable/disable a virtual interface
8272*4882a593Smuzhiyun * @adap: the adapter
8273*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8274*4882a593Smuzhiyun * @viid: the VI id
8275*4882a593Smuzhiyun * @rx_en: 1=enable Rx, 0=disable Rx
8276*4882a593Smuzhiyun * @tx_en: 1=enable Tx, 0=disable Tx
8277*4882a593Smuzhiyun *
8278*4882a593Smuzhiyun * Enables/disables a virtual interface.
8279*4882a593Smuzhiyun */
t4_enable_vi(struct adapter * adap,unsigned int mbox,unsigned int viid,bool rx_en,bool tx_en)8280*4882a593Smuzhiyun int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8281*4882a593Smuzhiyun bool rx_en, bool tx_en)
8282*4882a593Smuzhiyun {
8283*4882a593Smuzhiyun return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8284*4882a593Smuzhiyun }
8285*4882a593Smuzhiyun
8286*4882a593Smuzhiyun /**
8287*4882a593Smuzhiyun * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8288*4882a593Smuzhiyun * @adap: the adapter
8289*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8290*4882a593Smuzhiyun * @pi: the Port Information structure
8291*4882a593Smuzhiyun * @rx_en: 1=enable Rx, 0=disable Rx
8292*4882a593Smuzhiyun * @tx_en: 1=enable Tx, 0=disable Tx
8293*4882a593Smuzhiyun * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8294*4882a593Smuzhiyun *
8295*4882a593Smuzhiyun * Enables/disables a Port's Virtual Interface. Note that setting DCB
8296*4882a593Smuzhiyun * Enable only makes sense when enabling a Virtual Interface ...
8297*4882a593Smuzhiyun * If the Virtual Interface enable/disable operation is successful,
8298*4882a593Smuzhiyun * we notify the OS-specific code of a potential Link Status change
8299*4882a593Smuzhiyun * via the OS Contract API t4_os_link_changed().
8300*4882a593Smuzhiyun */
t4_enable_pi_params(struct adapter * adap,unsigned int mbox,struct port_info * pi,bool rx_en,bool tx_en,bool dcb_en)8301*4882a593Smuzhiyun int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8302*4882a593Smuzhiyun struct port_info *pi,
8303*4882a593Smuzhiyun bool rx_en, bool tx_en, bool dcb_en)
8304*4882a593Smuzhiyun {
8305*4882a593Smuzhiyun int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8306*4882a593Smuzhiyun rx_en, tx_en, dcb_en);
8307*4882a593Smuzhiyun if (ret)
8308*4882a593Smuzhiyun return ret;
8309*4882a593Smuzhiyun t4_os_link_changed(adap, pi->port_id,
8310*4882a593Smuzhiyun rx_en && tx_en && pi->link_cfg.link_ok);
8311*4882a593Smuzhiyun return 0;
8312*4882a593Smuzhiyun }
8313*4882a593Smuzhiyun
8314*4882a593Smuzhiyun /**
8315*4882a593Smuzhiyun * t4_identify_port - identify a VI's port by blinking its LED
8316*4882a593Smuzhiyun * @adap: the adapter
8317*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8318*4882a593Smuzhiyun * @viid: the VI id
8319*4882a593Smuzhiyun * @nblinks: how many times to blink LED at 2.5 Hz
8320*4882a593Smuzhiyun *
8321*4882a593Smuzhiyun * Identifies a VI's port by blinking its LED.
8322*4882a593Smuzhiyun */
t4_identify_port(struct adapter * adap,unsigned int mbox,unsigned int viid,unsigned int nblinks)8323*4882a593Smuzhiyun int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8324*4882a593Smuzhiyun unsigned int nblinks)
8325*4882a593Smuzhiyun {
8326*4882a593Smuzhiyun struct fw_vi_enable_cmd c;
8327*4882a593Smuzhiyun
8328*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8329*4882a593Smuzhiyun c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8330*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8331*4882a593Smuzhiyun FW_VI_ENABLE_CMD_VIID_V(viid));
8332*4882a593Smuzhiyun c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8333*4882a593Smuzhiyun c.blinkdur = cpu_to_be16(nblinks);
8334*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8335*4882a593Smuzhiyun }
8336*4882a593Smuzhiyun
8337*4882a593Smuzhiyun /**
8338*4882a593Smuzhiyun * t4_iq_stop - stop an ingress queue and its FLs
8339*4882a593Smuzhiyun * @adap: the adapter
8340*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8341*4882a593Smuzhiyun * @pf: the PF owning the queues
8342*4882a593Smuzhiyun * @vf: the VF owning the queues
8343*4882a593Smuzhiyun * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8344*4882a593Smuzhiyun * @iqid: ingress queue id
8345*4882a593Smuzhiyun * @fl0id: FL0 queue id or 0xffff if no attached FL0
8346*4882a593Smuzhiyun * @fl1id: FL1 queue id or 0xffff if no attached FL1
8347*4882a593Smuzhiyun *
8348*4882a593Smuzhiyun * Stops an ingress queue and its associated FLs, if any. This causes
8349*4882a593Smuzhiyun * any current or future data/messages destined for these queues to be
8350*4882a593Smuzhiyun * tossed.
8351*4882a593Smuzhiyun */
t4_iq_stop(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8352*4882a593Smuzhiyun int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8353*4882a593Smuzhiyun unsigned int vf, unsigned int iqtype, unsigned int iqid,
8354*4882a593Smuzhiyun unsigned int fl0id, unsigned int fl1id)
8355*4882a593Smuzhiyun {
8356*4882a593Smuzhiyun struct fw_iq_cmd c;
8357*4882a593Smuzhiyun
8358*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8359*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8360*4882a593Smuzhiyun FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8361*4882a593Smuzhiyun FW_IQ_CMD_VFN_V(vf));
8362*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8363*4882a593Smuzhiyun c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8364*4882a593Smuzhiyun c.iqid = cpu_to_be16(iqid);
8365*4882a593Smuzhiyun c.fl0id = cpu_to_be16(fl0id);
8366*4882a593Smuzhiyun c.fl1id = cpu_to_be16(fl1id);
8367*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8368*4882a593Smuzhiyun }
8369*4882a593Smuzhiyun
8370*4882a593Smuzhiyun /**
8371*4882a593Smuzhiyun * t4_iq_free - free an ingress queue and its FLs
8372*4882a593Smuzhiyun * @adap: the adapter
8373*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8374*4882a593Smuzhiyun * @pf: the PF owning the queues
8375*4882a593Smuzhiyun * @vf: the VF owning the queues
8376*4882a593Smuzhiyun * @iqtype: the ingress queue type
8377*4882a593Smuzhiyun * @iqid: ingress queue id
8378*4882a593Smuzhiyun * @fl0id: FL0 queue id or 0xffff if no attached FL0
8379*4882a593Smuzhiyun * @fl1id: FL1 queue id or 0xffff if no attached FL1
8380*4882a593Smuzhiyun *
8381*4882a593Smuzhiyun * Frees an ingress queue and its associated FLs, if any.
8382*4882a593Smuzhiyun */
t4_iq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int iqtype,unsigned int iqid,unsigned int fl0id,unsigned int fl1id)8383*4882a593Smuzhiyun int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8384*4882a593Smuzhiyun unsigned int vf, unsigned int iqtype, unsigned int iqid,
8385*4882a593Smuzhiyun unsigned int fl0id, unsigned int fl1id)
8386*4882a593Smuzhiyun {
8387*4882a593Smuzhiyun struct fw_iq_cmd c;
8388*4882a593Smuzhiyun
8389*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8390*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8391*4882a593Smuzhiyun FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8392*4882a593Smuzhiyun FW_IQ_CMD_VFN_V(vf));
8393*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8394*4882a593Smuzhiyun c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8395*4882a593Smuzhiyun c.iqid = cpu_to_be16(iqid);
8396*4882a593Smuzhiyun c.fl0id = cpu_to_be16(fl0id);
8397*4882a593Smuzhiyun c.fl1id = cpu_to_be16(fl1id);
8398*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8399*4882a593Smuzhiyun }
8400*4882a593Smuzhiyun
8401*4882a593Smuzhiyun /**
8402*4882a593Smuzhiyun * t4_eth_eq_free - free an Ethernet egress queue
8403*4882a593Smuzhiyun * @adap: the adapter
8404*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8405*4882a593Smuzhiyun * @pf: the PF owning the queue
8406*4882a593Smuzhiyun * @vf: the VF owning the queue
8407*4882a593Smuzhiyun * @eqid: egress queue id
8408*4882a593Smuzhiyun *
8409*4882a593Smuzhiyun * Frees an Ethernet egress queue.
8410*4882a593Smuzhiyun */
t4_eth_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8411*4882a593Smuzhiyun int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8412*4882a593Smuzhiyun unsigned int vf, unsigned int eqid)
8413*4882a593Smuzhiyun {
8414*4882a593Smuzhiyun struct fw_eq_eth_cmd c;
8415*4882a593Smuzhiyun
8416*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8417*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8418*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8419*4882a593Smuzhiyun FW_EQ_ETH_CMD_PFN_V(pf) |
8420*4882a593Smuzhiyun FW_EQ_ETH_CMD_VFN_V(vf));
8421*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8422*4882a593Smuzhiyun c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8423*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8424*4882a593Smuzhiyun }
8425*4882a593Smuzhiyun
8426*4882a593Smuzhiyun /**
8427*4882a593Smuzhiyun * t4_ctrl_eq_free - free a control egress queue
8428*4882a593Smuzhiyun * @adap: the adapter
8429*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8430*4882a593Smuzhiyun * @pf: the PF owning the queue
8431*4882a593Smuzhiyun * @vf: the VF owning the queue
8432*4882a593Smuzhiyun * @eqid: egress queue id
8433*4882a593Smuzhiyun *
8434*4882a593Smuzhiyun * Frees a control egress queue.
8435*4882a593Smuzhiyun */
t4_ctrl_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8436*4882a593Smuzhiyun int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8437*4882a593Smuzhiyun unsigned int vf, unsigned int eqid)
8438*4882a593Smuzhiyun {
8439*4882a593Smuzhiyun struct fw_eq_ctrl_cmd c;
8440*4882a593Smuzhiyun
8441*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8442*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8443*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8444*4882a593Smuzhiyun FW_EQ_CTRL_CMD_PFN_V(pf) |
8445*4882a593Smuzhiyun FW_EQ_CTRL_CMD_VFN_V(vf));
8446*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8447*4882a593Smuzhiyun c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8448*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8449*4882a593Smuzhiyun }
8450*4882a593Smuzhiyun
8451*4882a593Smuzhiyun /**
8452*4882a593Smuzhiyun * t4_ofld_eq_free - free an offload egress queue
8453*4882a593Smuzhiyun * @adap: the adapter
8454*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
8455*4882a593Smuzhiyun * @pf: the PF owning the queue
8456*4882a593Smuzhiyun * @vf: the VF owning the queue
8457*4882a593Smuzhiyun * @eqid: egress queue id
8458*4882a593Smuzhiyun *
8459*4882a593Smuzhiyun * Frees a control egress queue.
8460*4882a593Smuzhiyun */
t4_ofld_eq_free(struct adapter * adap,unsigned int mbox,unsigned int pf,unsigned int vf,unsigned int eqid)8461*4882a593Smuzhiyun int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8462*4882a593Smuzhiyun unsigned int vf, unsigned int eqid)
8463*4882a593Smuzhiyun {
8464*4882a593Smuzhiyun struct fw_eq_ofld_cmd c;
8465*4882a593Smuzhiyun
8466*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
8467*4882a593Smuzhiyun c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8468*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8469*4882a593Smuzhiyun FW_EQ_OFLD_CMD_PFN_V(pf) |
8470*4882a593Smuzhiyun FW_EQ_OFLD_CMD_VFN_V(vf));
8471*4882a593Smuzhiyun c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8472*4882a593Smuzhiyun c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8473*4882a593Smuzhiyun return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8474*4882a593Smuzhiyun }
8475*4882a593Smuzhiyun
8476*4882a593Smuzhiyun /**
8477*4882a593Smuzhiyun * t4_link_down_rc_str - return a string for a Link Down Reason Code
8478*4882a593Smuzhiyun * @link_down_rc: Link Down Reason Code
8479*4882a593Smuzhiyun *
8480*4882a593Smuzhiyun * Returns a string representation of the Link Down Reason Code.
8481*4882a593Smuzhiyun */
t4_link_down_rc_str(unsigned char link_down_rc)8482*4882a593Smuzhiyun static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8483*4882a593Smuzhiyun {
8484*4882a593Smuzhiyun static const char * const reason[] = {
8485*4882a593Smuzhiyun "Link Down",
8486*4882a593Smuzhiyun "Remote Fault",
8487*4882a593Smuzhiyun "Auto-negotiation Failure",
8488*4882a593Smuzhiyun "Reserved",
8489*4882a593Smuzhiyun "Insufficient Airflow",
8490*4882a593Smuzhiyun "Unable To Determine Reason",
8491*4882a593Smuzhiyun "No RX Signal Detected",
8492*4882a593Smuzhiyun "Reserved",
8493*4882a593Smuzhiyun };
8494*4882a593Smuzhiyun
8495*4882a593Smuzhiyun if (link_down_rc >= ARRAY_SIZE(reason))
8496*4882a593Smuzhiyun return "Bad Reason Code";
8497*4882a593Smuzhiyun
8498*4882a593Smuzhiyun return reason[link_down_rc];
8499*4882a593Smuzhiyun }
8500*4882a593Smuzhiyun
8501*4882a593Smuzhiyun /* Return the highest speed set in the port capabilities, in Mb/s. */
fwcap_to_speed(fw_port_cap32_t caps)8502*4882a593Smuzhiyun static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8503*4882a593Smuzhiyun {
8504*4882a593Smuzhiyun #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8505*4882a593Smuzhiyun do { \
8506*4882a593Smuzhiyun if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8507*4882a593Smuzhiyun return __speed; \
8508*4882a593Smuzhiyun } while (0)
8509*4882a593Smuzhiyun
8510*4882a593Smuzhiyun TEST_SPEED_RETURN(400G, 400000);
8511*4882a593Smuzhiyun TEST_SPEED_RETURN(200G, 200000);
8512*4882a593Smuzhiyun TEST_SPEED_RETURN(100G, 100000);
8513*4882a593Smuzhiyun TEST_SPEED_RETURN(50G, 50000);
8514*4882a593Smuzhiyun TEST_SPEED_RETURN(40G, 40000);
8515*4882a593Smuzhiyun TEST_SPEED_RETURN(25G, 25000);
8516*4882a593Smuzhiyun TEST_SPEED_RETURN(10G, 10000);
8517*4882a593Smuzhiyun TEST_SPEED_RETURN(1G, 1000);
8518*4882a593Smuzhiyun TEST_SPEED_RETURN(100M, 100);
8519*4882a593Smuzhiyun
8520*4882a593Smuzhiyun #undef TEST_SPEED_RETURN
8521*4882a593Smuzhiyun
8522*4882a593Smuzhiyun return 0;
8523*4882a593Smuzhiyun }
8524*4882a593Smuzhiyun
8525*4882a593Smuzhiyun /**
8526*4882a593Smuzhiyun * fwcap_to_fwspeed - return highest speed in Port Capabilities
8527*4882a593Smuzhiyun * @acaps: advertised Port Capabilities
8528*4882a593Smuzhiyun *
8529*4882a593Smuzhiyun * Get the highest speed for the port from the advertised Port
8530*4882a593Smuzhiyun * Capabilities. It will be either the highest speed from the list of
8531*4882a593Smuzhiyun * speeds or whatever user has set using ethtool.
8532*4882a593Smuzhiyun */
fwcap_to_fwspeed(fw_port_cap32_t acaps)8533*4882a593Smuzhiyun static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8534*4882a593Smuzhiyun {
8535*4882a593Smuzhiyun #define TEST_SPEED_RETURN(__caps_speed) \
8536*4882a593Smuzhiyun do { \
8537*4882a593Smuzhiyun if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8538*4882a593Smuzhiyun return FW_PORT_CAP32_SPEED_##__caps_speed; \
8539*4882a593Smuzhiyun } while (0)
8540*4882a593Smuzhiyun
8541*4882a593Smuzhiyun TEST_SPEED_RETURN(400G);
8542*4882a593Smuzhiyun TEST_SPEED_RETURN(200G);
8543*4882a593Smuzhiyun TEST_SPEED_RETURN(100G);
8544*4882a593Smuzhiyun TEST_SPEED_RETURN(50G);
8545*4882a593Smuzhiyun TEST_SPEED_RETURN(40G);
8546*4882a593Smuzhiyun TEST_SPEED_RETURN(25G);
8547*4882a593Smuzhiyun TEST_SPEED_RETURN(10G);
8548*4882a593Smuzhiyun TEST_SPEED_RETURN(1G);
8549*4882a593Smuzhiyun TEST_SPEED_RETURN(100M);
8550*4882a593Smuzhiyun
8551*4882a593Smuzhiyun #undef TEST_SPEED_RETURN
8552*4882a593Smuzhiyun
8553*4882a593Smuzhiyun return 0;
8554*4882a593Smuzhiyun }
8555*4882a593Smuzhiyun
8556*4882a593Smuzhiyun /**
8557*4882a593Smuzhiyun * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8558*4882a593Smuzhiyun * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8559*4882a593Smuzhiyun *
8560*4882a593Smuzhiyun * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8561*4882a593Smuzhiyun * 32-bit Port Capabilities value.
8562*4882a593Smuzhiyun */
lstatus_to_fwcap(u32 lstatus)8563*4882a593Smuzhiyun static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8564*4882a593Smuzhiyun {
8565*4882a593Smuzhiyun fw_port_cap32_t linkattr = 0;
8566*4882a593Smuzhiyun
8567*4882a593Smuzhiyun /* Unfortunately the format of the Link Status in the old
8568*4882a593Smuzhiyun * 16-bit Port Information message isn't the same as the
8569*4882a593Smuzhiyun * 16-bit Port Capabilities bitfield used everywhere else ...
8570*4882a593Smuzhiyun */
8571*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8572*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_FC_RX;
8573*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8574*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_FC_TX;
8575*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8576*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_100M;
8577*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8578*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_1G;
8579*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8580*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_10G;
8581*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8582*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_25G;
8583*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8584*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_40G;
8585*4882a593Smuzhiyun if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8586*4882a593Smuzhiyun linkattr |= FW_PORT_CAP32_SPEED_100G;
8587*4882a593Smuzhiyun
8588*4882a593Smuzhiyun return linkattr;
8589*4882a593Smuzhiyun }
8590*4882a593Smuzhiyun
8591*4882a593Smuzhiyun /**
8592*4882a593Smuzhiyun * t4_handle_get_port_info - process a FW reply message
8593*4882a593Smuzhiyun * @pi: the port info
8594*4882a593Smuzhiyun * @rpl: start of the FW message
8595*4882a593Smuzhiyun *
8596*4882a593Smuzhiyun * Processes a GET_PORT_INFO FW reply message.
8597*4882a593Smuzhiyun */
t4_handle_get_port_info(struct port_info * pi,const __be64 * rpl)8598*4882a593Smuzhiyun void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8599*4882a593Smuzhiyun {
8600*4882a593Smuzhiyun const struct fw_port_cmd *cmd = (const void *)rpl;
8601*4882a593Smuzhiyun fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8602*4882a593Smuzhiyun struct link_config *lc = &pi->link_cfg;
8603*4882a593Smuzhiyun struct adapter *adapter = pi->adapter;
8604*4882a593Smuzhiyun unsigned int speed, fc, fec, adv_fc;
8605*4882a593Smuzhiyun enum fw_port_module_type mod_type;
8606*4882a593Smuzhiyun int action, link_ok, linkdnrc;
8607*4882a593Smuzhiyun enum fw_port_type port_type;
8608*4882a593Smuzhiyun
8609*4882a593Smuzhiyun /* Extract the various fields from the Port Information message.
8610*4882a593Smuzhiyun */
8611*4882a593Smuzhiyun action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8612*4882a593Smuzhiyun switch (action) {
8613*4882a593Smuzhiyun case FW_PORT_ACTION_GET_PORT_INFO: {
8614*4882a593Smuzhiyun u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8615*4882a593Smuzhiyun
8616*4882a593Smuzhiyun link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8617*4882a593Smuzhiyun linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8618*4882a593Smuzhiyun port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8619*4882a593Smuzhiyun mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8620*4882a593Smuzhiyun pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8621*4882a593Smuzhiyun acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8622*4882a593Smuzhiyun lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8623*4882a593Smuzhiyun linkattr = lstatus_to_fwcap(lstatus);
8624*4882a593Smuzhiyun break;
8625*4882a593Smuzhiyun }
8626*4882a593Smuzhiyun
8627*4882a593Smuzhiyun case FW_PORT_ACTION_GET_PORT_INFO32: {
8628*4882a593Smuzhiyun u32 lstatus32;
8629*4882a593Smuzhiyun
8630*4882a593Smuzhiyun lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8631*4882a593Smuzhiyun link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8632*4882a593Smuzhiyun linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8633*4882a593Smuzhiyun port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8634*4882a593Smuzhiyun mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8635*4882a593Smuzhiyun pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8636*4882a593Smuzhiyun acaps = be32_to_cpu(cmd->u.info32.acaps32);
8637*4882a593Smuzhiyun lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8638*4882a593Smuzhiyun linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8639*4882a593Smuzhiyun break;
8640*4882a593Smuzhiyun }
8641*4882a593Smuzhiyun
8642*4882a593Smuzhiyun default:
8643*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8644*4882a593Smuzhiyun be32_to_cpu(cmd->action_to_len16));
8645*4882a593Smuzhiyun return;
8646*4882a593Smuzhiyun }
8647*4882a593Smuzhiyun
8648*4882a593Smuzhiyun fec = fwcap_to_cc_fec(acaps);
8649*4882a593Smuzhiyun adv_fc = fwcap_to_cc_pause(acaps);
8650*4882a593Smuzhiyun fc = fwcap_to_cc_pause(linkattr);
8651*4882a593Smuzhiyun speed = fwcap_to_speed(linkattr);
8652*4882a593Smuzhiyun
8653*4882a593Smuzhiyun /* Reset state for communicating new Transceiver Module status and
8654*4882a593Smuzhiyun * whether the OS-dependent layer wants us to redo the current
8655*4882a593Smuzhiyun * "sticky" L1 Configure Link Parameters.
8656*4882a593Smuzhiyun */
8657*4882a593Smuzhiyun lc->new_module = false;
8658*4882a593Smuzhiyun lc->redo_l1cfg = false;
8659*4882a593Smuzhiyun
8660*4882a593Smuzhiyun if (mod_type != pi->mod_type) {
8661*4882a593Smuzhiyun /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8662*4882a593Smuzhiyun * various fundamental Port Capabilities which used to be
8663*4882a593Smuzhiyun * immutable can now change radically. We can now have
8664*4882a593Smuzhiyun * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8665*4882a593Smuzhiyun * all change based on what Transceiver Module is inserted.
8666*4882a593Smuzhiyun * So we need to record the Physical "Port" Capabilities on
8667*4882a593Smuzhiyun * every Transceiver Module change.
8668*4882a593Smuzhiyun */
8669*4882a593Smuzhiyun lc->pcaps = pcaps;
8670*4882a593Smuzhiyun
8671*4882a593Smuzhiyun /* When a new Transceiver Module is inserted, the Firmware
8672*4882a593Smuzhiyun * will examine its i2c EPROM to determine its type and
8673*4882a593Smuzhiyun * general operating parameters including things like Forward
8674*4882a593Smuzhiyun * Error Control, etc. Various IEEE 802.3 standards dictate
8675*4882a593Smuzhiyun * how to interpret these i2c values to determine default
8676*4882a593Smuzhiyun * "sutomatic" settings. We record these for future use when
8677*4882a593Smuzhiyun * the user explicitly requests these standards-based values.
8678*4882a593Smuzhiyun */
8679*4882a593Smuzhiyun lc->def_acaps = acaps;
8680*4882a593Smuzhiyun
8681*4882a593Smuzhiyun /* Some versions of the early T6 Firmware "cheated" when
8682*4882a593Smuzhiyun * handling different Transceiver Modules by changing the
8683*4882a593Smuzhiyun * underlaying Port Type reported to the Host Drivers. As
8684*4882a593Smuzhiyun * such we need to capture whatever Port Type the Firmware
8685*4882a593Smuzhiyun * sends us and record it in case it's different from what we
8686*4882a593Smuzhiyun * were told earlier. Unfortunately, since Firmware is
8687*4882a593Smuzhiyun * forever, we'll need to keep this code here forever, but in
8688*4882a593Smuzhiyun * later T6 Firmware it should just be an assignment of the
8689*4882a593Smuzhiyun * same value already recorded.
8690*4882a593Smuzhiyun */
8691*4882a593Smuzhiyun pi->port_type = port_type;
8692*4882a593Smuzhiyun
8693*4882a593Smuzhiyun /* Record new Module Type information.
8694*4882a593Smuzhiyun */
8695*4882a593Smuzhiyun pi->mod_type = mod_type;
8696*4882a593Smuzhiyun
8697*4882a593Smuzhiyun /* Let the OS-dependent layer know if we have a new
8698*4882a593Smuzhiyun * Transceiver Module inserted.
8699*4882a593Smuzhiyun */
8700*4882a593Smuzhiyun lc->new_module = t4_is_inserted_mod_type(mod_type);
8701*4882a593Smuzhiyun
8702*4882a593Smuzhiyun t4_os_portmod_changed(adapter, pi->port_id);
8703*4882a593Smuzhiyun }
8704*4882a593Smuzhiyun
8705*4882a593Smuzhiyun if (link_ok != lc->link_ok || speed != lc->speed ||
8706*4882a593Smuzhiyun fc != lc->fc || adv_fc != lc->advertised_fc ||
8707*4882a593Smuzhiyun fec != lc->fec) {
8708*4882a593Smuzhiyun /* something changed */
8709*4882a593Smuzhiyun if (!link_ok && lc->link_ok) {
8710*4882a593Smuzhiyun lc->link_down_rc = linkdnrc;
8711*4882a593Smuzhiyun dev_warn_ratelimited(adapter->pdev_dev,
8712*4882a593Smuzhiyun "Port %d link down, reason: %s\n",
8713*4882a593Smuzhiyun pi->tx_chan,
8714*4882a593Smuzhiyun t4_link_down_rc_str(linkdnrc));
8715*4882a593Smuzhiyun }
8716*4882a593Smuzhiyun lc->link_ok = link_ok;
8717*4882a593Smuzhiyun lc->speed = speed;
8718*4882a593Smuzhiyun lc->advertised_fc = adv_fc;
8719*4882a593Smuzhiyun lc->fc = fc;
8720*4882a593Smuzhiyun lc->fec = fec;
8721*4882a593Smuzhiyun
8722*4882a593Smuzhiyun lc->lpacaps = lpacaps;
8723*4882a593Smuzhiyun lc->acaps = acaps & ADVERT_MASK;
8724*4882a593Smuzhiyun
8725*4882a593Smuzhiyun /* If we're not physically capable of Auto-Negotiation, note
8726*4882a593Smuzhiyun * this as Auto-Negotiation disabled. Otherwise, we track
8727*4882a593Smuzhiyun * what Auto-Negotiation settings we have. Note parallel
8728*4882a593Smuzhiyun * structure in t4_link_l1cfg_core() and init_link_config().
8729*4882a593Smuzhiyun */
8730*4882a593Smuzhiyun if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8731*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
8732*4882a593Smuzhiyun } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8733*4882a593Smuzhiyun lc->autoneg = AUTONEG_ENABLE;
8734*4882a593Smuzhiyun } else {
8735*4882a593Smuzhiyun /* When Autoneg is disabled, user needs to set
8736*4882a593Smuzhiyun * single speed.
8737*4882a593Smuzhiyun * Similar to cxgb4_ethtool.c: set_link_ksettings
8738*4882a593Smuzhiyun */
8739*4882a593Smuzhiyun lc->acaps = 0;
8740*4882a593Smuzhiyun lc->speed_caps = fwcap_to_fwspeed(acaps);
8741*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
8742*4882a593Smuzhiyun }
8743*4882a593Smuzhiyun
8744*4882a593Smuzhiyun t4_os_link_changed(adapter, pi->port_id, link_ok);
8745*4882a593Smuzhiyun }
8746*4882a593Smuzhiyun
8747*4882a593Smuzhiyun /* If we have a new Transceiver Module and the OS-dependent code has
8748*4882a593Smuzhiyun * told us that it wants us to redo whatever "sticky" L1 Configuration
8749*4882a593Smuzhiyun * Link Parameters are set, do that now.
8750*4882a593Smuzhiyun */
8751*4882a593Smuzhiyun if (lc->new_module && lc->redo_l1cfg) {
8752*4882a593Smuzhiyun struct link_config old_lc;
8753*4882a593Smuzhiyun int ret;
8754*4882a593Smuzhiyun
8755*4882a593Smuzhiyun /* Save the current L1 Configuration and restore it if an
8756*4882a593Smuzhiyun * error occurs. We probably should fix the l1_cfg*()
8757*4882a593Smuzhiyun * routines not to change the link_config when an error
8758*4882a593Smuzhiyun * occurs ...
8759*4882a593Smuzhiyun */
8760*4882a593Smuzhiyun old_lc = *lc;
8761*4882a593Smuzhiyun ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8762*4882a593Smuzhiyun if (ret) {
8763*4882a593Smuzhiyun *lc = old_lc;
8764*4882a593Smuzhiyun dev_warn(adapter->pdev_dev,
8765*4882a593Smuzhiyun "Attempt to update new Transceiver Module settings failed\n");
8766*4882a593Smuzhiyun }
8767*4882a593Smuzhiyun }
8768*4882a593Smuzhiyun lc->new_module = false;
8769*4882a593Smuzhiyun lc->redo_l1cfg = false;
8770*4882a593Smuzhiyun }
8771*4882a593Smuzhiyun
8772*4882a593Smuzhiyun /**
8773*4882a593Smuzhiyun * t4_update_port_info - retrieve and update port information if changed
8774*4882a593Smuzhiyun * @pi: the port_info
8775*4882a593Smuzhiyun *
8776*4882a593Smuzhiyun * We issue a Get Port Information Command to the Firmware and, if
8777*4882a593Smuzhiyun * successful, we check to see if anything is different from what we
8778*4882a593Smuzhiyun * last recorded and update things accordingly.
8779*4882a593Smuzhiyun */
t4_update_port_info(struct port_info * pi)8780*4882a593Smuzhiyun int t4_update_port_info(struct port_info *pi)
8781*4882a593Smuzhiyun {
8782*4882a593Smuzhiyun unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8783*4882a593Smuzhiyun struct fw_port_cmd port_cmd;
8784*4882a593Smuzhiyun int ret;
8785*4882a593Smuzhiyun
8786*4882a593Smuzhiyun memset(&port_cmd, 0, sizeof(port_cmd));
8787*4882a593Smuzhiyun port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8788*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
8789*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(pi->tx_chan));
8790*4882a593Smuzhiyun port_cmd.action_to_len16 = cpu_to_be32(
8791*4882a593Smuzhiyun FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8792*4882a593Smuzhiyun ? FW_PORT_ACTION_GET_PORT_INFO
8793*4882a593Smuzhiyun : FW_PORT_ACTION_GET_PORT_INFO32) |
8794*4882a593Smuzhiyun FW_LEN16(port_cmd));
8795*4882a593Smuzhiyun ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8796*4882a593Smuzhiyun &port_cmd, sizeof(port_cmd), &port_cmd);
8797*4882a593Smuzhiyun if (ret)
8798*4882a593Smuzhiyun return ret;
8799*4882a593Smuzhiyun
8800*4882a593Smuzhiyun t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8801*4882a593Smuzhiyun return 0;
8802*4882a593Smuzhiyun }
8803*4882a593Smuzhiyun
8804*4882a593Smuzhiyun /**
8805*4882a593Smuzhiyun * t4_get_link_params - retrieve basic link parameters for given port
8806*4882a593Smuzhiyun * @pi: the port
8807*4882a593Smuzhiyun * @link_okp: value return pointer for link up/down
8808*4882a593Smuzhiyun * @speedp: value return pointer for speed (Mb/s)
8809*4882a593Smuzhiyun * @mtup: value return pointer for mtu
8810*4882a593Smuzhiyun *
8811*4882a593Smuzhiyun * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8812*4882a593Smuzhiyun * and MTU for a specified port. A negative error is returned on
8813*4882a593Smuzhiyun * failure; 0 on success.
8814*4882a593Smuzhiyun */
t4_get_link_params(struct port_info * pi,unsigned int * link_okp,unsigned int * speedp,unsigned int * mtup)8815*4882a593Smuzhiyun int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8816*4882a593Smuzhiyun unsigned int *speedp, unsigned int *mtup)
8817*4882a593Smuzhiyun {
8818*4882a593Smuzhiyun unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8819*4882a593Smuzhiyun unsigned int action, link_ok, mtu;
8820*4882a593Smuzhiyun struct fw_port_cmd port_cmd;
8821*4882a593Smuzhiyun fw_port_cap32_t linkattr;
8822*4882a593Smuzhiyun int ret;
8823*4882a593Smuzhiyun
8824*4882a593Smuzhiyun memset(&port_cmd, 0, sizeof(port_cmd));
8825*4882a593Smuzhiyun port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8826*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
8827*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(pi->tx_chan));
8828*4882a593Smuzhiyun action = (fw_caps == FW_CAPS16
8829*4882a593Smuzhiyun ? FW_PORT_ACTION_GET_PORT_INFO
8830*4882a593Smuzhiyun : FW_PORT_ACTION_GET_PORT_INFO32);
8831*4882a593Smuzhiyun port_cmd.action_to_len16 = cpu_to_be32(
8832*4882a593Smuzhiyun FW_PORT_CMD_ACTION_V(action) |
8833*4882a593Smuzhiyun FW_LEN16(port_cmd));
8834*4882a593Smuzhiyun ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8835*4882a593Smuzhiyun &port_cmd, sizeof(port_cmd), &port_cmd);
8836*4882a593Smuzhiyun if (ret)
8837*4882a593Smuzhiyun return ret;
8838*4882a593Smuzhiyun
8839*4882a593Smuzhiyun if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8840*4882a593Smuzhiyun u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8841*4882a593Smuzhiyun
8842*4882a593Smuzhiyun link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8843*4882a593Smuzhiyun linkattr = lstatus_to_fwcap(lstatus);
8844*4882a593Smuzhiyun mtu = be16_to_cpu(port_cmd.u.info.mtu);
8845*4882a593Smuzhiyun } else {
8846*4882a593Smuzhiyun u32 lstatus32 =
8847*4882a593Smuzhiyun be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8848*4882a593Smuzhiyun
8849*4882a593Smuzhiyun link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8850*4882a593Smuzhiyun linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8851*4882a593Smuzhiyun mtu = FW_PORT_CMD_MTU32_G(
8852*4882a593Smuzhiyun be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8853*4882a593Smuzhiyun }
8854*4882a593Smuzhiyun
8855*4882a593Smuzhiyun if (link_okp)
8856*4882a593Smuzhiyun *link_okp = link_ok;
8857*4882a593Smuzhiyun if (speedp)
8858*4882a593Smuzhiyun *speedp = fwcap_to_speed(linkattr);
8859*4882a593Smuzhiyun if (mtup)
8860*4882a593Smuzhiyun *mtup = mtu;
8861*4882a593Smuzhiyun
8862*4882a593Smuzhiyun return 0;
8863*4882a593Smuzhiyun }
8864*4882a593Smuzhiyun
8865*4882a593Smuzhiyun /**
8866*4882a593Smuzhiyun * t4_handle_fw_rpl - process a FW reply message
8867*4882a593Smuzhiyun * @adap: the adapter
8868*4882a593Smuzhiyun * @rpl: start of the FW message
8869*4882a593Smuzhiyun *
8870*4882a593Smuzhiyun * Processes a FW message, such as link state change messages.
8871*4882a593Smuzhiyun */
t4_handle_fw_rpl(struct adapter * adap,const __be64 * rpl)8872*4882a593Smuzhiyun int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8873*4882a593Smuzhiyun {
8874*4882a593Smuzhiyun u8 opcode = *(const u8 *)rpl;
8875*4882a593Smuzhiyun
8876*4882a593Smuzhiyun /* This might be a port command ... this simplifies the following
8877*4882a593Smuzhiyun * conditionals ... We can get away with pre-dereferencing
8878*4882a593Smuzhiyun * action_to_len16 because it's in the first 16 bytes and all messages
8879*4882a593Smuzhiyun * will be at least that long.
8880*4882a593Smuzhiyun */
8881*4882a593Smuzhiyun const struct fw_port_cmd *p = (const void *)rpl;
8882*4882a593Smuzhiyun unsigned int action =
8883*4882a593Smuzhiyun FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8884*4882a593Smuzhiyun
8885*4882a593Smuzhiyun if (opcode == FW_PORT_CMD &&
8886*4882a593Smuzhiyun (action == FW_PORT_ACTION_GET_PORT_INFO ||
8887*4882a593Smuzhiyun action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8888*4882a593Smuzhiyun int i;
8889*4882a593Smuzhiyun int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8890*4882a593Smuzhiyun struct port_info *pi = NULL;
8891*4882a593Smuzhiyun
8892*4882a593Smuzhiyun for_each_port(adap, i) {
8893*4882a593Smuzhiyun pi = adap2pinfo(adap, i);
8894*4882a593Smuzhiyun if (pi->tx_chan == chan)
8895*4882a593Smuzhiyun break;
8896*4882a593Smuzhiyun }
8897*4882a593Smuzhiyun
8898*4882a593Smuzhiyun t4_handle_get_port_info(pi, rpl);
8899*4882a593Smuzhiyun } else {
8900*4882a593Smuzhiyun dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8901*4882a593Smuzhiyun opcode);
8902*4882a593Smuzhiyun return -EINVAL;
8903*4882a593Smuzhiyun }
8904*4882a593Smuzhiyun return 0;
8905*4882a593Smuzhiyun }
8906*4882a593Smuzhiyun
get_pci_mode(struct adapter * adapter,struct pci_params * p)8907*4882a593Smuzhiyun static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8908*4882a593Smuzhiyun {
8909*4882a593Smuzhiyun u16 val;
8910*4882a593Smuzhiyun
8911*4882a593Smuzhiyun if (pci_is_pcie(adapter->pdev)) {
8912*4882a593Smuzhiyun pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8913*4882a593Smuzhiyun p->speed = val & PCI_EXP_LNKSTA_CLS;
8914*4882a593Smuzhiyun p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8915*4882a593Smuzhiyun }
8916*4882a593Smuzhiyun }
8917*4882a593Smuzhiyun
8918*4882a593Smuzhiyun /**
8919*4882a593Smuzhiyun * init_link_config - initialize a link's SW state
8920*4882a593Smuzhiyun * @lc: pointer to structure holding the link state
8921*4882a593Smuzhiyun * @pcaps: link Port Capabilities
8922*4882a593Smuzhiyun * @acaps: link current Advertised Port Capabilities
8923*4882a593Smuzhiyun *
8924*4882a593Smuzhiyun * Initializes the SW state maintained for each link, including the link's
8925*4882a593Smuzhiyun * capabilities and default speed/flow-control/autonegotiation settings.
8926*4882a593Smuzhiyun */
init_link_config(struct link_config * lc,fw_port_cap32_t pcaps,fw_port_cap32_t acaps)8927*4882a593Smuzhiyun static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8928*4882a593Smuzhiyun fw_port_cap32_t acaps)
8929*4882a593Smuzhiyun {
8930*4882a593Smuzhiyun lc->pcaps = pcaps;
8931*4882a593Smuzhiyun lc->def_acaps = acaps;
8932*4882a593Smuzhiyun lc->lpacaps = 0;
8933*4882a593Smuzhiyun lc->speed_caps = 0;
8934*4882a593Smuzhiyun lc->speed = 0;
8935*4882a593Smuzhiyun lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8936*4882a593Smuzhiyun
8937*4882a593Smuzhiyun /* For Forward Error Control, we default to whatever the Firmware
8938*4882a593Smuzhiyun * tells us the Link is currently advertising.
8939*4882a593Smuzhiyun */
8940*4882a593Smuzhiyun lc->requested_fec = FEC_AUTO;
8941*4882a593Smuzhiyun lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8942*4882a593Smuzhiyun
8943*4882a593Smuzhiyun /* If the Port is capable of Auto-Negtotiation, initialize it as
8944*4882a593Smuzhiyun * "enabled" and copy over all of the Physical Port Capabilities
8945*4882a593Smuzhiyun * to the Advertised Port Capabilities. Otherwise mark it as
8946*4882a593Smuzhiyun * Auto-Negotiate disabled and select the highest supported speed
8947*4882a593Smuzhiyun * for the link. Note parallel structure in t4_link_l1cfg_core()
8948*4882a593Smuzhiyun * and t4_handle_get_port_info().
8949*4882a593Smuzhiyun */
8950*4882a593Smuzhiyun if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8951*4882a593Smuzhiyun lc->acaps = lc->pcaps & ADVERT_MASK;
8952*4882a593Smuzhiyun lc->autoneg = AUTONEG_ENABLE;
8953*4882a593Smuzhiyun lc->requested_fc |= PAUSE_AUTONEG;
8954*4882a593Smuzhiyun } else {
8955*4882a593Smuzhiyun lc->acaps = 0;
8956*4882a593Smuzhiyun lc->autoneg = AUTONEG_DISABLE;
8957*4882a593Smuzhiyun lc->speed_caps = fwcap_to_fwspeed(acaps);
8958*4882a593Smuzhiyun }
8959*4882a593Smuzhiyun }
8960*4882a593Smuzhiyun
8961*4882a593Smuzhiyun #define CIM_PF_NOACCESS 0xeeeeeeee
8962*4882a593Smuzhiyun
t4_wait_dev_ready(void __iomem * regs)8963*4882a593Smuzhiyun int t4_wait_dev_ready(void __iomem *regs)
8964*4882a593Smuzhiyun {
8965*4882a593Smuzhiyun u32 whoami;
8966*4882a593Smuzhiyun
8967*4882a593Smuzhiyun whoami = readl(regs + PL_WHOAMI_A);
8968*4882a593Smuzhiyun if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8969*4882a593Smuzhiyun return 0;
8970*4882a593Smuzhiyun
8971*4882a593Smuzhiyun msleep(500);
8972*4882a593Smuzhiyun whoami = readl(regs + PL_WHOAMI_A);
8973*4882a593Smuzhiyun return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8974*4882a593Smuzhiyun }
8975*4882a593Smuzhiyun
8976*4882a593Smuzhiyun struct flash_desc {
8977*4882a593Smuzhiyun u32 vendor_and_model_id;
8978*4882a593Smuzhiyun u32 size_mb;
8979*4882a593Smuzhiyun };
8980*4882a593Smuzhiyun
t4_get_flash_params(struct adapter * adap)8981*4882a593Smuzhiyun static int t4_get_flash_params(struct adapter *adap)
8982*4882a593Smuzhiyun {
8983*4882a593Smuzhiyun /* Table for non-Numonix supported flash parts. Numonix parts are left
8984*4882a593Smuzhiyun * to the preexisting code. All flash parts have 64KB sectors.
8985*4882a593Smuzhiyun */
8986*4882a593Smuzhiyun static struct flash_desc supported_flash[] = {
8987*4882a593Smuzhiyun { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8988*4882a593Smuzhiyun };
8989*4882a593Smuzhiyun
8990*4882a593Smuzhiyun unsigned int part, manufacturer;
8991*4882a593Smuzhiyun unsigned int density, size = 0;
8992*4882a593Smuzhiyun u32 flashid = 0;
8993*4882a593Smuzhiyun int ret;
8994*4882a593Smuzhiyun
8995*4882a593Smuzhiyun /* Issue a Read ID Command to the Flash part. We decode supported
8996*4882a593Smuzhiyun * Flash parts and their sizes from this. There's a newer Query
8997*4882a593Smuzhiyun * Command which can retrieve detailed geometry information but many
8998*4882a593Smuzhiyun * Flash parts don't support it.
8999*4882a593Smuzhiyun */
9000*4882a593Smuzhiyun
9001*4882a593Smuzhiyun ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
9002*4882a593Smuzhiyun if (!ret)
9003*4882a593Smuzhiyun ret = sf1_read(adap, 3, 0, 1, &flashid);
9004*4882a593Smuzhiyun t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
9005*4882a593Smuzhiyun if (ret)
9006*4882a593Smuzhiyun return ret;
9007*4882a593Smuzhiyun
9008*4882a593Smuzhiyun /* Check to see if it's one of our non-standard supported Flash parts.
9009*4882a593Smuzhiyun */
9010*4882a593Smuzhiyun for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
9011*4882a593Smuzhiyun if (supported_flash[part].vendor_and_model_id == flashid) {
9012*4882a593Smuzhiyun adap->params.sf_size = supported_flash[part].size_mb;
9013*4882a593Smuzhiyun adap->params.sf_nsec =
9014*4882a593Smuzhiyun adap->params.sf_size / SF_SEC_SIZE;
9015*4882a593Smuzhiyun goto found;
9016*4882a593Smuzhiyun }
9017*4882a593Smuzhiyun
9018*4882a593Smuzhiyun /* Decode Flash part size. The code below looks repetitive with
9019*4882a593Smuzhiyun * common encodings, but that's not guaranteed in the JEDEC
9020*4882a593Smuzhiyun * specification for the Read JEDEC ID command. The only thing that
9021*4882a593Smuzhiyun * we're guaranteed by the JEDEC specification is where the
9022*4882a593Smuzhiyun * Manufacturer ID is in the returned result. After that each
9023*4882a593Smuzhiyun * Manufacturer ~could~ encode things completely differently.
9024*4882a593Smuzhiyun * Note, all Flash parts must have 64KB sectors.
9025*4882a593Smuzhiyun */
9026*4882a593Smuzhiyun manufacturer = flashid & 0xff;
9027*4882a593Smuzhiyun switch (manufacturer) {
9028*4882a593Smuzhiyun case 0x20: { /* Micron/Numonix */
9029*4882a593Smuzhiyun /* This Density -> Size decoding table is taken from Micron
9030*4882a593Smuzhiyun * Data Sheets.
9031*4882a593Smuzhiyun */
9032*4882a593Smuzhiyun density = (flashid >> 16) & 0xff;
9033*4882a593Smuzhiyun switch (density) {
9034*4882a593Smuzhiyun case 0x14: /* 1MB */
9035*4882a593Smuzhiyun size = 1 << 20;
9036*4882a593Smuzhiyun break;
9037*4882a593Smuzhiyun case 0x15: /* 2MB */
9038*4882a593Smuzhiyun size = 1 << 21;
9039*4882a593Smuzhiyun break;
9040*4882a593Smuzhiyun case 0x16: /* 4MB */
9041*4882a593Smuzhiyun size = 1 << 22;
9042*4882a593Smuzhiyun break;
9043*4882a593Smuzhiyun case 0x17: /* 8MB */
9044*4882a593Smuzhiyun size = 1 << 23;
9045*4882a593Smuzhiyun break;
9046*4882a593Smuzhiyun case 0x18: /* 16MB */
9047*4882a593Smuzhiyun size = 1 << 24;
9048*4882a593Smuzhiyun break;
9049*4882a593Smuzhiyun case 0x19: /* 32MB */
9050*4882a593Smuzhiyun size = 1 << 25;
9051*4882a593Smuzhiyun break;
9052*4882a593Smuzhiyun case 0x20: /* 64MB */
9053*4882a593Smuzhiyun size = 1 << 26;
9054*4882a593Smuzhiyun break;
9055*4882a593Smuzhiyun case 0x21: /* 128MB */
9056*4882a593Smuzhiyun size = 1 << 27;
9057*4882a593Smuzhiyun break;
9058*4882a593Smuzhiyun case 0x22: /* 256MB */
9059*4882a593Smuzhiyun size = 1 << 28;
9060*4882a593Smuzhiyun break;
9061*4882a593Smuzhiyun }
9062*4882a593Smuzhiyun break;
9063*4882a593Smuzhiyun }
9064*4882a593Smuzhiyun case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
9065*4882a593Smuzhiyun /* This Density -> Size decoding table is taken from ISSI
9066*4882a593Smuzhiyun * Data Sheets.
9067*4882a593Smuzhiyun */
9068*4882a593Smuzhiyun density = (flashid >> 16) & 0xff;
9069*4882a593Smuzhiyun switch (density) {
9070*4882a593Smuzhiyun case 0x16: /* 32 MB */
9071*4882a593Smuzhiyun size = 1 << 25;
9072*4882a593Smuzhiyun break;
9073*4882a593Smuzhiyun case 0x17: /* 64MB */
9074*4882a593Smuzhiyun size = 1 << 26;
9075*4882a593Smuzhiyun break;
9076*4882a593Smuzhiyun }
9077*4882a593Smuzhiyun break;
9078*4882a593Smuzhiyun }
9079*4882a593Smuzhiyun case 0xc2: { /* Macronix */
9080*4882a593Smuzhiyun /* This Density -> Size decoding table is taken from Macronix
9081*4882a593Smuzhiyun * Data Sheets.
9082*4882a593Smuzhiyun */
9083*4882a593Smuzhiyun density = (flashid >> 16) & 0xff;
9084*4882a593Smuzhiyun switch (density) {
9085*4882a593Smuzhiyun case 0x17: /* 8MB */
9086*4882a593Smuzhiyun size = 1 << 23;
9087*4882a593Smuzhiyun break;
9088*4882a593Smuzhiyun case 0x18: /* 16MB */
9089*4882a593Smuzhiyun size = 1 << 24;
9090*4882a593Smuzhiyun break;
9091*4882a593Smuzhiyun }
9092*4882a593Smuzhiyun break;
9093*4882a593Smuzhiyun }
9094*4882a593Smuzhiyun case 0xef: { /* Winbond */
9095*4882a593Smuzhiyun /* This Density -> Size decoding table is taken from Winbond
9096*4882a593Smuzhiyun * Data Sheets.
9097*4882a593Smuzhiyun */
9098*4882a593Smuzhiyun density = (flashid >> 16) & 0xff;
9099*4882a593Smuzhiyun switch (density) {
9100*4882a593Smuzhiyun case 0x17: /* 8MB */
9101*4882a593Smuzhiyun size = 1 << 23;
9102*4882a593Smuzhiyun break;
9103*4882a593Smuzhiyun case 0x18: /* 16MB */
9104*4882a593Smuzhiyun size = 1 << 24;
9105*4882a593Smuzhiyun break;
9106*4882a593Smuzhiyun }
9107*4882a593Smuzhiyun break;
9108*4882a593Smuzhiyun }
9109*4882a593Smuzhiyun }
9110*4882a593Smuzhiyun
9111*4882a593Smuzhiyun /* If we didn't recognize the FLASH part, that's no real issue: the
9112*4882a593Smuzhiyun * Hardware/Software contract says that Hardware will _*ALWAYS*_
9113*4882a593Smuzhiyun * use a FLASH part which is at least 4MB in size and has 64KB
9114*4882a593Smuzhiyun * sectors. The unrecognized FLASH part is likely to be much larger
9115*4882a593Smuzhiyun * than 4MB, but that's all we really need.
9116*4882a593Smuzhiyun */
9117*4882a593Smuzhiyun if (size == 0) {
9118*4882a593Smuzhiyun dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
9119*4882a593Smuzhiyun flashid);
9120*4882a593Smuzhiyun size = 1 << 22;
9121*4882a593Smuzhiyun }
9122*4882a593Smuzhiyun
9123*4882a593Smuzhiyun /* Store decoded Flash size and fall through into vetting code. */
9124*4882a593Smuzhiyun adap->params.sf_size = size;
9125*4882a593Smuzhiyun adap->params.sf_nsec = size / SF_SEC_SIZE;
9126*4882a593Smuzhiyun
9127*4882a593Smuzhiyun found:
9128*4882a593Smuzhiyun if (adap->params.sf_size < FLASH_MIN_SIZE)
9129*4882a593Smuzhiyun dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
9130*4882a593Smuzhiyun flashid, adap->params.sf_size, FLASH_MIN_SIZE);
9131*4882a593Smuzhiyun return 0;
9132*4882a593Smuzhiyun }
9133*4882a593Smuzhiyun
9134*4882a593Smuzhiyun /**
9135*4882a593Smuzhiyun * t4_prep_adapter - prepare SW and HW for operation
9136*4882a593Smuzhiyun * @adapter: the adapter
9137*4882a593Smuzhiyun *
9138*4882a593Smuzhiyun * Initialize adapter SW state for the various HW modules, set initial
9139*4882a593Smuzhiyun * values for some adapter tunables, take PHYs out of reset, and
9140*4882a593Smuzhiyun * initialize the MDIO interface.
9141*4882a593Smuzhiyun */
t4_prep_adapter(struct adapter * adapter)9142*4882a593Smuzhiyun int t4_prep_adapter(struct adapter *adapter)
9143*4882a593Smuzhiyun {
9144*4882a593Smuzhiyun int ret, ver;
9145*4882a593Smuzhiyun uint16_t device_id;
9146*4882a593Smuzhiyun u32 pl_rev;
9147*4882a593Smuzhiyun
9148*4882a593Smuzhiyun get_pci_mode(adapter, &adapter->params.pci);
9149*4882a593Smuzhiyun pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
9150*4882a593Smuzhiyun
9151*4882a593Smuzhiyun ret = t4_get_flash_params(adapter);
9152*4882a593Smuzhiyun if (ret < 0) {
9153*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
9154*4882a593Smuzhiyun return ret;
9155*4882a593Smuzhiyun }
9156*4882a593Smuzhiyun
9157*4882a593Smuzhiyun /* Retrieve adapter's device ID
9158*4882a593Smuzhiyun */
9159*4882a593Smuzhiyun pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
9160*4882a593Smuzhiyun ver = device_id >> 12;
9161*4882a593Smuzhiyun adapter->params.chip = 0;
9162*4882a593Smuzhiyun switch (ver) {
9163*4882a593Smuzhiyun case CHELSIO_T4:
9164*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
9165*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = DBPRIO_F;
9166*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
9167*4882a593Smuzhiyun NUM_MPS_CLS_SRAM_L_INSTANCES;
9168*4882a593Smuzhiyun adapter->params.arch.mps_rplc_size = 128;
9169*4882a593Smuzhiyun adapter->params.arch.nchan = NCHAN;
9170*4882a593Smuzhiyun adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9171*4882a593Smuzhiyun adapter->params.arch.vfcount = 128;
9172*4882a593Smuzhiyun /* Congestion map is for 4 channels so that
9173*4882a593Smuzhiyun * MPS can have 4 priority per port.
9174*4882a593Smuzhiyun */
9175*4882a593Smuzhiyun adapter->params.arch.cng_ch_bits_log = 2;
9176*4882a593Smuzhiyun break;
9177*4882a593Smuzhiyun case CHELSIO_T5:
9178*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
9179*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
9180*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
9181*4882a593Smuzhiyun NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9182*4882a593Smuzhiyun adapter->params.arch.mps_rplc_size = 128;
9183*4882a593Smuzhiyun adapter->params.arch.nchan = NCHAN;
9184*4882a593Smuzhiyun adapter->params.arch.pm_stats_cnt = PM_NSTATS;
9185*4882a593Smuzhiyun adapter->params.arch.vfcount = 128;
9186*4882a593Smuzhiyun adapter->params.arch.cng_ch_bits_log = 2;
9187*4882a593Smuzhiyun break;
9188*4882a593Smuzhiyun case CHELSIO_T6:
9189*4882a593Smuzhiyun adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
9190*4882a593Smuzhiyun adapter->params.arch.sge_fl_db = 0;
9191*4882a593Smuzhiyun adapter->params.arch.mps_tcam_size =
9192*4882a593Smuzhiyun NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
9193*4882a593Smuzhiyun adapter->params.arch.mps_rplc_size = 256;
9194*4882a593Smuzhiyun adapter->params.arch.nchan = 2;
9195*4882a593Smuzhiyun adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
9196*4882a593Smuzhiyun adapter->params.arch.vfcount = 256;
9197*4882a593Smuzhiyun /* Congestion map will be for 2 channels so that
9198*4882a593Smuzhiyun * MPS can have 8 priority per port.
9199*4882a593Smuzhiyun */
9200*4882a593Smuzhiyun adapter->params.arch.cng_ch_bits_log = 3;
9201*4882a593Smuzhiyun break;
9202*4882a593Smuzhiyun default:
9203*4882a593Smuzhiyun dev_err(adapter->pdev_dev, "Device %d is not supported\n",
9204*4882a593Smuzhiyun device_id);
9205*4882a593Smuzhiyun return -EINVAL;
9206*4882a593Smuzhiyun }
9207*4882a593Smuzhiyun
9208*4882a593Smuzhiyun adapter->params.cim_la_size = CIMLA_SIZE;
9209*4882a593Smuzhiyun init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
9210*4882a593Smuzhiyun
9211*4882a593Smuzhiyun /*
9212*4882a593Smuzhiyun * Default port for debugging in case we can't reach FW.
9213*4882a593Smuzhiyun */
9214*4882a593Smuzhiyun adapter->params.nports = 1;
9215*4882a593Smuzhiyun adapter->params.portvec = 1;
9216*4882a593Smuzhiyun adapter->params.vpd.cclk = 50000;
9217*4882a593Smuzhiyun
9218*4882a593Smuzhiyun /* Set PCIe completion timeout to 4 seconds. */
9219*4882a593Smuzhiyun pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
9220*4882a593Smuzhiyun PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
9221*4882a593Smuzhiyun return 0;
9222*4882a593Smuzhiyun }
9223*4882a593Smuzhiyun
9224*4882a593Smuzhiyun /**
9225*4882a593Smuzhiyun * t4_shutdown_adapter - shut down adapter, host & wire
9226*4882a593Smuzhiyun * @adapter: the adapter
9227*4882a593Smuzhiyun *
9228*4882a593Smuzhiyun * Perform an emergency shutdown of the adapter and stop it from
9229*4882a593Smuzhiyun * continuing any further communication on the ports or DMA to the
9230*4882a593Smuzhiyun * host. This is typically used when the adapter and/or firmware
9231*4882a593Smuzhiyun * have crashed and we want to prevent any further accidental
9232*4882a593Smuzhiyun * communication with the rest of the world. This will also force
9233*4882a593Smuzhiyun * the port Link Status to go down -- if register writes work --
9234*4882a593Smuzhiyun * which should help our peers figure out that we're down.
9235*4882a593Smuzhiyun */
t4_shutdown_adapter(struct adapter * adapter)9236*4882a593Smuzhiyun int t4_shutdown_adapter(struct adapter *adapter)
9237*4882a593Smuzhiyun {
9238*4882a593Smuzhiyun int port;
9239*4882a593Smuzhiyun
9240*4882a593Smuzhiyun t4_intr_disable(adapter);
9241*4882a593Smuzhiyun t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9242*4882a593Smuzhiyun for_each_port(adapter, port) {
9243*4882a593Smuzhiyun u32 a_port_cfg = is_t4(adapter->params.chip) ?
9244*4882a593Smuzhiyun PORT_REG(port, XGMAC_PORT_CFG_A) :
9245*4882a593Smuzhiyun T5_PORT_REG(port, MAC_PORT_CFG_A);
9246*4882a593Smuzhiyun
9247*4882a593Smuzhiyun t4_write_reg(adapter, a_port_cfg,
9248*4882a593Smuzhiyun t4_read_reg(adapter, a_port_cfg)
9249*4882a593Smuzhiyun & ~SIGNAL_DET_V(1));
9250*4882a593Smuzhiyun }
9251*4882a593Smuzhiyun t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9252*4882a593Smuzhiyun
9253*4882a593Smuzhiyun return 0;
9254*4882a593Smuzhiyun }
9255*4882a593Smuzhiyun
9256*4882a593Smuzhiyun /**
9257*4882a593Smuzhiyun * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9258*4882a593Smuzhiyun * @adapter: the adapter
9259*4882a593Smuzhiyun * @qid: the Queue ID
9260*4882a593Smuzhiyun * @qtype: the Ingress or Egress type for @qid
9261*4882a593Smuzhiyun * @user: true if this request is for a user mode queue
9262*4882a593Smuzhiyun * @pbar2_qoffset: BAR2 Queue Offset
9263*4882a593Smuzhiyun * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9264*4882a593Smuzhiyun *
9265*4882a593Smuzhiyun * Returns the BAR2 SGE Queue Registers information associated with the
9266*4882a593Smuzhiyun * indicated Absolute Queue ID. These are passed back in return value
9267*4882a593Smuzhiyun * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9268*4882a593Smuzhiyun * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9269*4882a593Smuzhiyun *
9270*4882a593Smuzhiyun * This may return an error which indicates that BAR2 SGE Queue
9271*4882a593Smuzhiyun * registers aren't available. If an error is not returned, then the
9272*4882a593Smuzhiyun * following values are returned:
9273*4882a593Smuzhiyun *
9274*4882a593Smuzhiyun * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9275*4882a593Smuzhiyun * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9276*4882a593Smuzhiyun *
9277*4882a593Smuzhiyun * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9278*4882a593Smuzhiyun * require the "Inferred Queue ID" ability may be used. E.g. the
9279*4882a593Smuzhiyun * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9280*4882a593Smuzhiyun * then these "Inferred Queue ID" register may not be used.
9281*4882a593Smuzhiyun */
t4_bar2_sge_qregs(struct adapter * adapter,unsigned int qid,enum t4_bar2_qtype qtype,int user,u64 * pbar2_qoffset,unsigned int * pbar2_qid)9282*4882a593Smuzhiyun int t4_bar2_sge_qregs(struct adapter *adapter,
9283*4882a593Smuzhiyun unsigned int qid,
9284*4882a593Smuzhiyun enum t4_bar2_qtype qtype,
9285*4882a593Smuzhiyun int user,
9286*4882a593Smuzhiyun u64 *pbar2_qoffset,
9287*4882a593Smuzhiyun unsigned int *pbar2_qid)
9288*4882a593Smuzhiyun {
9289*4882a593Smuzhiyun unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9290*4882a593Smuzhiyun u64 bar2_page_offset, bar2_qoffset;
9291*4882a593Smuzhiyun unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9292*4882a593Smuzhiyun
9293*4882a593Smuzhiyun /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9294*4882a593Smuzhiyun if (!user && is_t4(adapter->params.chip))
9295*4882a593Smuzhiyun return -EINVAL;
9296*4882a593Smuzhiyun
9297*4882a593Smuzhiyun /* Get our SGE Page Size parameters.
9298*4882a593Smuzhiyun */
9299*4882a593Smuzhiyun page_shift = adapter->params.sge.hps + 10;
9300*4882a593Smuzhiyun page_size = 1 << page_shift;
9301*4882a593Smuzhiyun
9302*4882a593Smuzhiyun /* Get the right Queues per Page parameters for our Queue.
9303*4882a593Smuzhiyun */
9304*4882a593Smuzhiyun qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9305*4882a593Smuzhiyun ? adapter->params.sge.eq_qpp
9306*4882a593Smuzhiyun : adapter->params.sge.iq_qpp);
9307*4882a593Smuzhiyun qpp_mask = (1 << qpp_shift) - 1;
9308*4882a593Smuzhiyun
9309*4882a593Smuzhiyun /* Calculate the basics of the BAR2 SGE Queue register area:
9310*4882a593Smuzhiyun * o The BAR2 page the Queue registers will be in.
9311*4882a593Smuzhiyun * o The BAR2 Queue ID.
9312*4882a593Smuzhiyun * o The BAR2 Queue ID Offset into the BAR2 page.
9313*4882a593Smuzhiyun */
9314*4882a593Smuzhiyun bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9315*4882a593Smuzhiyun bar2_qid = qid & qpp_mask;
9316*4882a593Smuzhiyun bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9317*4882a593Smuzhiyun
9318*4882a593Smuzhiyun /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9319*4882a593Smuzhiyun * hardware will infer the Absolute Queue ID simply from the writes to
9320*4882a593Smuzhiyun * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9321*4882a593Smuzhiyun * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9322*4882a593Smuzhiyun * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9323*4882a593Smuzhiyun * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9324*4882a593Smuzhiyun * from the BAR2 Page and BAR2 Queue ID.
9325*4882a593Smuzhiyun *
9326*4882a593Smuzhiyun * One important censequence of this is that some BAR2 SGE registers
9327*4882a593Smuzhiyun * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9328*4882a593Smuzhiyun * there. But other registers synthesize the SGE Queue ID purely
9329*4882a593Smuzhiyun * from the writes to the registers -- the Write Combined Doorbell
9330*4882a593Smuzhiyun * Buffer is a good example. These BAR2 SGE Registers are only
9331*4882a593Smuzhiyun * available for those BAR2 SGE Register areas where the SGE Absolute
9332*4882a593Smuzhiyun * Queue ID can be inferred from simple writes.
9333*4882a593Smuzhiyun */
9334*4882a593Smuzhiyun bar2_qoffset = bar2_page_offset;
9335*4882a593Smuzhiyun bar2_qinferred = (bar2_qid_offset < page_size);
9336*4882a593Smuzhiyun if (bar2_qinferred) {
9337*4882a593Smuzhiyun bar2_qoffset += bar2_qid_offset;
9338*4882a593Smuzhiyun bar2_qid = 0;
9339*4882a593Smuzhiyun }
9340*4882a593Smuzhiyun
9341*4882a593Smuzhiyun *pbar2_qoffset = bar2_qoffset;
9342*4882a593Smuzhiyun *pbar2_qid = bar2_qid;
9343*4882a593Smuzhiyun return 0;
9344*4882a593Smuzhiyun }
9345*4882a593Smuzhiyun
9346*4882a593Smuzhiyun /**
9347*4882a593Smuzhiyun * t4_init_devlog_params - initialize adapter->params.devlog
9348*4882a593Smuzhiyun * @adap: the adapter
9349*4882a593Smuzhiyun *
9350*4882a593Smuzhiyun * Initialize various fields of the adapter's Firmware Device Log
9351*4882a593Smuzhiyun * Parameters structure.
9352*4882a593Smuzhiyun */
t4_init_devlog_params(struct adapter * adap)9353*4882a593Smuzhiyun int t4_init_devlog_params(struct adapter *adap)
9354*4882a593Smuzhiyun {
9355*4882a593Smuzhiyun struct devlog_params *dparams = &adap->params.devlog;
9356*4882a593Smuzhiyun u32 pf_dparams;
9357*4882a593Smuzhiyun unsigned int devlog_meminfo;
9358*4882a593Smuzhiyun struct fw_devlog_cmd devlog_cmd;
9359*4882a593Smuzhiyun int ret;
9360*4882a593Smuzhiyun
9361*4882a593Smuzhiyun /* If we're dealing with newer firmware, the Device Log Parameters
9362*4882a593Smuzhiyun * are stored in a designated register which allows us to access the
9363*4882a593Smuzhiyun * Device Log even if we can't talk to the firmware.
9364*4882a593Smuzhiyun */
9365*4882a593Smuzhiyun pf_dparams =
9366*4882a593Smuzhiyun t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9367*4882a593Smuzhiyun if (pf_dparams) {
9368*4882a593Smuzhiyun unsigned int nentries, nentries128;
9369*4882a593Smuzhiyun
9370*4882a593Smuzhiyun dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9371*4882a593Smuzhiyun dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9372*4882a593Smuzhiyun
9373*4882a593Smuzhiyun nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9374*4882a593Smuzhiyun nentries = (nentries128 + 1) * 128;
9375*4882a593Smuzhiyun dparams->size = nentries * sizeof(struct fw_devlog_e);
9376*4882a593Smuzhiyun
9377*4882a593Smuzhiyun return 0;
9378*4882a593Smuzhiyun }
9379*4882a593Smuzhiyun
9380*4882a593Smuzhiyun /* Otherwise, ask the firmware for it's Device Log Parameters.
9381*4882a593Smuzhiyun */
9382*4882a593Smuzhiyun memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9383*4882a593Smuzhiyun devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9384*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F);
9385*4882a593Smuzhiyun devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9386*4882a593Smuzhiyun ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9387*4882a593Smuzhiyun &devlog_cmd);
9388*4882a593Smuzhiyun if (ret)
9389*4882a593Smuzhiyun return ret;
9390*4882a593Smuzhiyun
9391*4882a593Smuzhiyun devlog_meminfo =
9392*4882a593Smuzhiyun be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9393*4882a593Smuzhiyun dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9394*4882a593Smuzhiyun dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9395*4882a593Smuzhiyun dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9396*4882a593Smuzhiyun
9397*4882a593Smuzhiyun return 0;
9398*4882a593Smuzhiyun }
9399*4882a593Smuzhiyun
9400*4882a593Smuzhiyun /**
9401*4882a593Smuzhiyun * t4_init_sge_params - initialize adap->params.sge
9402*4882a593Smuzhiyun * @adapter: the adapter
9403*4882a593Smuzhiyun *
9404*4882a593Smuzhiyun * Initialize various fields of the adapter's SGE Parameters structure.
9405*4882a593Smuzhiyun */
t4_init_sge_params(struct adapter * adapter)9406*4882a593Smuzhiyun int t4_init_sge_params(struct adapter *adapter)
9407*4882a593Smuzhiyun {
9408*4882a593Smuzhiyun struct sge_params *sge_params = &adapter->params.sge;
9409*4882a593Smuzhiyun u32 hps, qpp;
9410*4882a593Smuzhiyun unsigned int s_hps, s_qpp;
9411*4882a593Smuzhiyun
9412*4882a593Smuzhiyun /* Extract the SGE Page Size for our PF.
9413*4882a593Smuzhiyun */
9414*4882a593Smuzhiyun hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9415*4882a593Smuzhiyun s_hps = (HOSTPAGESIZEPF0_S +
9416*4882a593Smuzhiyun (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9417*4882a593Smuzhiyun sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9418*4882a593Smuzhiyun
9419*4882a593Smuzhiyun /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9420*4882a593Smuzhiyun */
9421*4882a593Smuzhiyun s_qpp = (QUEUESPERPAGEPF0_S +
9422*4882a593Smuzhiyun (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9423*4882a593Smuzhiyun qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9424*4882a593Smuzhiyun sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9425*4882a593Smuzhiyun qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9426*4882a593Smuzhiyun sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9427*4882a593Smuzhiyun
9428*4882a593Smuzhiyun return 0;
9429*4882a593Smuzhiyun }
9430*4882a593Smuzhiyun
9431*4882a593Smuzhiyun /**
9432*4882a593Smuzhiyun * t4_init_tp_params - initialize adap->params.tp
9433*4882a593Smuzhiyun * @adap: the adapter
9434*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
9435*4882a593Smuzhiyun *
9436*4882a593Smuzhiyun * Initialize various fields of the adapter's TP Parameters structure.
9437*4882a593Smuzhiyun */
t4_init_tp_params(struct adapter * adap,bool sleep_ok)9438*4882a593Smuzhiyun int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9439*4882a593Smuzhiyun {
9440*4882a593Smuzhiyun u32 param, val, v;
9441*4882a593Smuzhiyun int chan, ret;
9442*4882a593Smuzhiyun
9443*4882a593Smuzhiyun
9444*4882a593Smuzhiyun v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9445*4882a593Smuzhiyun adap->params.tp.tre = TIMERRESOLUTION_G(v);
9446*4882a593Smuzhiyun adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9447*4882a593Smuzhiyun
9448*4882a593Smuzhiyun /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9449*4882a593Smuzhiyun for (chan = 0; chan < NCHAN; chan++)
9450*4882a593Smuzhiyun adap->params.tp.tx_modq[chan] = chan;
9451*4882a593Smuzhiyun
9452*4882a593Smuzhiyun /* Cache the adapter's Compressed Filter Mode/Mask and global Ingress
9453*4882a593Smuzhiyun * Configuration.
9454*4882a593Smuzhiyun */
9455*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
9456*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FILTER) |
9457*4882a593Smuzhiyun FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_FILTER_MODE_MASK));
9458*4882a593Smuzhiyun
9459*4882a593Smuzhiyun /* Read current value */
9460*4882a593Smuzhiyun ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
9461*4882a593Smuzhiyun ¶m, &val);
9462*4882a593Smuzhiyun if (ret == 0) {
9463*4882a593Smuzhiyun dev_info(adap->pdev_dev,
9464*4882a593Smuzhiyun "Current filter mode/mask 0x%x:0x%x\n",
9465*4882a593Smuzhiyun FW_PARAMS_PARAM_FILTER_MODE_G(val),
9466*4882a593Smuzhiyun FW_PARAMS_PARAM_FILTER_MASK_G(val));
9467*4882a593Smuzhiyun adap->params.tp.vlan_pri_map =
9468*4882a593Smuzhiyun FW_PARAMS_PARAM_FILTER_MODE_G(val);
9469*4882a593Smuzhiyun adap->params.tp.filter_mask =
9470*4882a593Smuzhiyun FW_PARAMS_PARAM_FILTER_MASK_G(val);
9471*4882a593Smuzhiyun } else {
9472*4882a593Smuzhiyun dev_info(adap->pdev_dev,
9473*4882a593Smuzhiyun "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
9474*4882a593Smuzhiyun
9475*4882a593Smuzhiyun /* Incase of older-fw (which doesn't expose the api
9476*4882a593Smuzhiyun * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
9477*4882a593Smuzhiyun * the fw api) combination, fall-back to older method of reading
9478*4882a593Smuzhiyun * the filter mode from indirect-register
9479*4882a593Smuzhiyun */
9480*4882a593Smuzhiyun t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9481*4882a593Smuzhiyun TP_VLAN_PRI_MAP_A, sleep_ok);
9482*4882a593Smuzhiyun
9483*4882a593Smuzhiyun /* With the older-fw and newer-driver combination we might run
9484*4882a593Smuzhiyun * into an issue when user wants to use hash filter region but
9485*4882a593Smuzhiyun * the filter_mask is zero, in this case filter_mask validation
9486*4882a593Smuzhiyun * is tough. To avoid that we set the filter_mask same as filter
9487*4882a593Smuzhiyun * mode, which will behave exactly as the older way of ignoring
9488*4882a593Smuzhiyun * the filter mask validation.
9489*4882a593Smuzhiyun */
9490*4882a593Smuzhiyun adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
9491*4882a593Smuzhiyun }
9492*4882a593Smuzhiyun
9493*4882a593Smuzhiyun t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9494*4882a593Smuzhiyun TP_INGRESS_CONFIG_A, sleep_ok);
9495*4882a593Smuzhiyun
9496*4882a593Smuzhiyun /* For T6, cache the adapter's compressed error vector
9497*4882a593Smuzhiyun * and passing outer header info for encapsulated packets.
9498*4882a593Smuzhiyun */
9499*4882a593Smuzhiyun if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9500*4882a593Smuzhiyun v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9501*4882a593Smuzhiyun adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9502*4882a593Smuzhiyun }
9503*4882a593Smuzhiyun
9504*4882a593Smuzhiyun /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9505*4882a593Smuzhiyun * shift positions of several elements of the Compressed Filter Tuple
9506*4882a593Smuzhiyun * for this adapter which we need frequently ...
9507*4882a593Smuzhiyun */
9508*4882a593Smuzhiyun adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9509*4882a593Smuzhiyun adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9510*4882a593Smuzhiyun adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9511*4882a593Smuzhiyun adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9512*4882a593Smuzhiyun adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9513*4882a593Smuzhiyun adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9514*4882a593Smuzhiyun PROTOCOL_F);
9515*4882a593Smuzhiyun adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9516*4882a593Smuzhiyun ETHERTYPE_F);
9517*4882a593Smuzhiyun adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9518*4882a593Smuzhiyun MACMATCH_F);
9519*4882a593Smuzhiyun adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9520*4882a593Smuzhiyun MPSHITTYPE_F);
9521*4882a593Smuzhiyun adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9522*4882a593Smuzhiyun FRAGMENTATION_F);
9523*4882a593Smuzhiyun
9524*4882a593Smuzhiyun /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9525*4882a593Smuzhiyun * represents the presence of an Outer VLAN instead of a VNIC ID.
9526*4882a593Smuzhiyun */
9527*4882a593Smuzhiyun if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9528*4882a593Smuzhiyun adap->params.tp.vnic_shift = -1;
9529*4882a593Smuzhiyun
9530*4882a593Smuzhiyun v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9531*4882a593Smuzhiyun adap->params.tp.hash_filter_mask = v;
9532*4882a593Smuzhiyun v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9533*4882a593Smuzhiyun adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9534*4882a593Smuzhiyun return 0;
9535*4882a593Smuzhiyun }
9536*4882a593Smuzhiyun
9537*4882a593Smuzhiyun /**
9538*4882a593Smuzhiyun * t4_filter_field_shift - calculate filter field shift
9539*4882a593Smuzhiyun * @adap: the adapter
9540*4882a593Smuzhiyun * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9541*4882a593Smuzhiyun *
9542*4882a593Smuzhiyun * Return the shift position of a filter field within the Compressed
9543*4882a593Smuzhiyun * Filter Tuple. The filter field is specified via its selection bit
9544*4882a593Smuzhiyun * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9545*4882a593Smuzhiyun */
t4_filter_field_shift(const struct adapter * adap,int filter_sel)9546*4882a593Smuzhiyun int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9547*4882a593Smuzhiyun {
9548*4882a593Smuzhiyun unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9549*4882a593Smuzhiyun unsigned int sel;
9550*4882a593Smuzhiyun int field_shift;
9551*4882a593Smuzhiyun
9552*4882a593Smuzhiyun if ((filter_mode & filter_sel) == 0)
9553*4882a593Smuzhiyun return -1;
9554*4882a593Smuzhiyun
9555*4882a593Smuzhiyun for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9556*4882a593Smuzhiyun switch (filter_mode & sel) {
9557*4882a593Smuzhiyun case FCOE_F:
9558*4882a593Smuzhiyun field_shift += FT_FCOE_W;
9559*4882a593Smuzhiyun break;
9560*4882a593Smuzhiyun case PORT_F:
9561*4882a593Smuzhiyun field_shift += FT_PORT_W;
9562*4882a593Smuzhiyun break;
9563*4882a593Smuzhiyun case VNIC_ID_F:
9564*4882a593Smuzhiyun field_shift += FT_VNIC_ID_W;
9565*4882a593Smuzhiyun break;
9566*4882a593Smuzhiyun case VLAN_F:
9567*4882a593Smuzhiyun field_shift += FT_VLAN_W;
9568*4882a593Smuzhiyun break;
9569*4882a593Smuzhiyun case TOS_F:
9570*4882a593Smuzhiyun field_shift += FT_TOS_W;
9571*4882a593Smuzhiyun break;
9572*4882a593Smuzhiyun case PROTOCOL_F:
9573*4882a593Smuzhiyun field_shift += FT_PROTOCOL_W;
9574*4882a593Smuzhiyun break;
9575*4882a593Smuzhiyun case ETHERTYPE_F:
9576*4882a593Smuzhiyun field_shift += FT_ETHERTYPE_W;
9577*4882a593Smuzhiyun break;
9578*4882a593Smuzhiyun case MACMATCH_F:
9579*4882a593Smuzhiyun field_shift += FT_MACMATCH_W;
9580*4882a593Smuzhiyun break;
9581*4882a593Smuzhiyun case MPSHITTYPE_F:
9582*4882a593Smuzhiyun field_shift += FT_MPSHITTYPE_W;
9583*4882a593Smuzhiyun break;
9584*4882a593Smuzhiyun case FRAGMENTATION_F:
9585*4882a593Smuzhiyun field_shift += FT_FRAGMENTATION_W;
9586*4882a593Smuzhiyun break;
9587*4882a593Smuzhiyun }
9588*4882a593Smuzhiyun }
9589*4882a593Smuzhiyun return field_shift;
9590*4882a593Smuzhiyun }
9591*4882a593Smuzhiyun
t4_init_rss_mode(struct adapter * adap,int mbox)9592*4882a593Smuzhiyun int t4_init_rss_mode(struct adapter *adap, int mbox)
9593*4882a593Smuzhiyun {
9594*4882a593Smuzhiyun int i, ret;
9595*4882a593Smuzhiyun struct fw_rss_vi_config_cmd rvc;
9596*4882a593Smuzhiyun
9597*4882a593Smuzhiyun memset(&rvc, 0, sizeof(rvc));
9598*4882a593Smuzhiyun
9599*4882a593Smuzhiyun for_each_port(adap, i) {
9600*4882a593Smuzhiyun struct port_info *p = adap2pinfo(adap, i);
9601*4882a593Smuzhiyun
9602*4882a593Smuzhiyun rvc.op_to_viid =
9603*4882a593Smuzhiyun cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9604*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
9605*4882a593Smuzhiyun FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9606*4882a593Smuzhiyun rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9607*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9608*4882a593Smuzhiyun if (ret)
9609*4882a593Smuzhiyun return ret;
9610*4882a593Smuzhiyun p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9611*4882a593Smuzhiyun }
9612*4882a593Smuzhiyun return 0;
9613*4882a593Smuzhiyun }
9614*4882a593Smuzhiyun
9615*4882a593Smuzhiyun /**
9616*4882a593Smuzhiyun * t4_init_portinfo - allocate a virtual interface and initialize port_info
9617*4882a593Smuzhiyun * @pi: the port_info
9618*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
9619*4882a593Smuzhiyun * @port: physical port associated with the VI
9620*4882a593Smuzhiyun * @pf: the PF owning the VI
9621*4882a593Smuzhiyun * @vf: the VF owning the VI
9622*4882a593Smuzhiyun * @mac: the MAC address of the VI
9623*4882a593Smuzhiyun *
9624*4882a593Smuzhiyun * Allocates a virtual interface for the given physical port. If @mac is
9625*4882a593Smuzhiyun * not %NULL it contains the MAC address of the VI as assigned by FW.
9626*4882a593Smuzhiyun * @mac should be large enough to hold an Ethernet address.
9627*4882a593Smuzhiyun * Returns < 0 on error.
9628*4882a593Smuzhiyun */
t4_init_portinfo(struct port_info * pi,int mbox,int port,int pf,int vf,u8 mac[])9629*4882a593Smuzhiyun int t4_init_portinfo(struct port_info *pi, int mbox,
9630*4882a593Smuzhiyun int port, int pf, int vf, u8 mac[])
9631*4882a593Smuzhiyun {
9632*4882a593Smuzhiyun struct adapter *adapter = pi->adapter;
9633*4882a593Smuzhiyun unsigned int fw_caps = adapter->params.fw_caps_support;
9634*4882a593Smuzhiyun struct fw_port_cmd cmd;
9635*4882a593Smuzhiyun unsigned int rss_size;
9636*4882a593Smuzhiyun enum fw_port_type port_type;
9637*4882a593Smuzhiyun int mdio_addr;
9638*4882a593Smuzhiyun fw_port_cap32_t pcaps, acaps;
9639*4882a593Smuzhiyun u8 vivld = 0, vin = 0;
9640*4882a593Smuzhiyun int ret;
9641*4882a593Smuzhiyun
9642*4882a593Smuzhiyun /* If we haven't yet determined whether we're talking to Firmware
9643*4882a593Smuzhiyun * which knows the new 32-bit Port Capabilities, it's time to find
9644*4882a593Smuzhiyun * out now. This will also tell new Firmware to send us Port Status
9645*4882a593Smuzhiyun * Updates using the new 32-bit Port Capabilities version of the
9646*4882a593Smuzhiyun * Port Information message.
9647*4882a593Smuzhiyun */
9648*4882a593Smuzhiyun if (fw_caps == FW_CAPS_UNKNOWN) {
9649*4882a593Smuzhiyun u32 param, val;
9650*4882a593Smuzhiyun
9651*4882a593Smuzhiyun param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9652*4882a593Smuzhiyun FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9653*4882a593Smuzhiyun val = 1;
9654*4882a593Smuzhiyun ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9655*4882a593Smuzhiyun fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9656*4882a593Smuzhiyun adapter->params.fw_caps_support = fw_caps;
9657*4882a593Smuzhiyun }
9658*4882a593Smuzhiyun
9659*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
9660*4882a593Smuzhiyun cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9661*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
9662*4882a593Smuzhiyun FW_PORT_CMD_PORTID_V(port));
9663*4882a593Smuzhiyun cmd.action_to_len16 = cpu_to_be32(
9664*4882a593Smuzhiyun FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9665*4882a593Smuzhiyun ? FW_PORT_ACTION_GET_PORT_INFO
9666*4882a593Smuzhiyun : FW_PORT_ACTION_GET_PORT_INFO32) |
9667*4882a593Smuzhiyun FW_LEN16(cmd));
9668*4882a593Smuzhiyun ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9669*4882a593Smuzhiyun if (ret)
9670*4882a593Smuzhiyun return ret;
9671*4882a593Smuzhiyun
9672*4882a593Smuzhiyun /* Extract the various fields from the Port Information message.
9673*4882a593Smuzhiyun */
9674*4882a593Smuzhiyun if (fw_caps == FW_CAPS16) {
9675*4882a593Smuzhiyun u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9676*4882a593Smuzhiyun
9677*4882a593Smuzhiyun port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9678*4882a593Smuzhiyun mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9679*4882a593Smuzhiyun ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9680*4882a593Smuzhiyun : -1);
9681*4882a593Smuzhiyun pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9682*4882a593Smuzhiyun acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9683*4882a593Smuzhiyun } else {
9684*4882a593Smuzhiyun u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9685*4882a593Smuzhiyun
9686*4882a593Smuzhiyun port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9687*4882a593Smuzhiyun mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9688*4882a593Smuzhiyun ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9689*4882a593Smuzhiyun : -1);
9690*4882a593Smuzhiyun pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9691*4882a593Smuzhiyun acaps = be32_to_cpu(cmd.u.info32.acaps32);
9692*4882a593Smuzhiyun }
9693*4882a593Smuzhiyun
9694*4882a593Smuzhiyun ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size,
9695*4882a593Smuzhiyun &vivld, &vin);
9696*4882a593Smuzhiyun if (ret < 0)
9697*4882a593Smuzhiyun return ret;
9698*4882a593Smuzhiyun
9699*4882a593Smuzhiyun pi->viid = ret;
9700*4882a593Smuzhiyun pi->tx_chan = port;
9701*4882a593Smuzhiyun pi->lport = port;
9702*4882a593Smuzhiyun pi->rss_size = rss_size;
9703*4882a593Smuzhiyun pi->rx_cchan = t4_get_tp_e2c_map(pi->adapter, port);
9704*4882a593Smuzhiyun
9705*4882a593Smuzhiyun /* If fw supports returning the VIN as part of FW_VI_CMD,
9706*4882a593Smuzhiyun * save the returned values.
9707*4882a593Smuzhiyun */
9708*4882a593Smuzhiyun if (adapter->params.viid_smt_extn_support) {
9709*4882a593Smuzhiyun pi->vivld = vivld;
9710*4882a593Smuzhiyun pi->vin = vin;
9711*4882a593Smuzhiyun } else {
9712*4882a593Smuzhiyun /* Retrieve the values from VIID */
9713*4882a593Smuzhiyun pi->vivld = FW_VIID_VIVLD_G(pi->viid);
9714*4882a593Smuzhiyun pi->vin = FW_VIID_VIN_G(pi->viid);
9715*4882a593Smuzhiyun }
9716*4882a593Smuzhiyun
9717*4882a593Smuzhiyun pi->port_type = port_type;
9718*4882a593Smuzhiyun pi->mdio_addr = mdio_addr;
9719*4882a593Smuzhiyun pi->mod_type = FW_PORT_MOD_TYPE_NA;
9720*4882a593Smuzhiyun
9721*4882a593Smuzhiyun init_link_config(&pi->link_cfg, pcaps, acaps);
9722*4882a593Smuzhiyun return 0;
9723*4882a593Smuzhiyun }
9724*4882a593Smuzhiyun
t4_port_init(struct adapter * adap,int mbox,int pf,int vf)9725*4882a593Smuzhiyun int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9726*4882a593Smuzhiyun {
9727*4882a593Smuzhiyun u8 addr[6];
9728*4882a593Smuzhiyun int ret, i, j = 0;
9729*4882a593Smuzhiyun
9730*4882a593Smuzhiyun for_each_port(adap, i) {
9731*4882a593Smuzhiyun struct port_info *pi = adap2pinfo(adap, i);
9732*4882a593Smuzhiyun
9733*4882a593Smuzhiyun while ((adap->params.portvec & (1 << j)) == 0)
9734*4882a593Smuzhiyun j++;
9735*4882a593Smuzhiyun
9736*4882a593Smuzhiyun ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9737*4882a593Smuzhiyun if (ret)
9738*4882a593Smuzhiyun return ret;
9739*4882a593Smuzhiyun
9740*4882a593Smuzhiyun memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9741*4882a593Smuzhiyun j++;
9742*4882a593Smuzhiyun }
9743*4882a593Smuzhiyun return 0;
9744*4882a593Smuzhiyun }
9745*4882a593Smuzhiyun
t4_init_port_mirror(struct port_info * pi,u8 mbox,u8 port,u8 pf,u8 vf,u16 * mirror_viid)9746*4882a593Smuzhiyun int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
9747*4882a593Smuzhiyun u16 *mirror_viid)
9748*4882a593Smuzhiyun {
9749*4882a593Smuzhiyun int ret;
9750*4882a593Smuzhiyun
9751*4882a593Smuzhiyun ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
9752*4882a593Smuzhiyun NULL, NULL);
9753*4882a593Smuzhiyun if (ret < 0)
9754*4882a593Smuzhiyun return ret;
9755*4882a593Smuzhiyun
9756*4882a593Smuzhiyun if (mirror_viid)
9757*4882a593Smuzhiyun *mirror_viid = ret;
9758*4882a593Smuzhiyun
9759*4882a593Smuzhiyun return 0;
9760*4882a593Smuzhiyun }
9761*4882a593Smuzhiyun
9762*4882a593Smuzhiyun /**
9763*4882a593Smuzhiyun * t4_read_cimq_cfg - read CIM queue configuration
9764*4882a593Smuzhiyun * @adap: the adapter
9765*4882a593Smuzhiyun * @base: holds the queue base addresses in bytes
9766*4882a593Smuzhiyun * @size: holds the queue sizes in bytes
9767*4882a593Smuzhiyun * @thres: holds the queue full thresholds in bytes
9768*4882a593Smuzhiyun *
9769*4882a593Smuzhiyun * Returns the current configuration of the CIM queues, starting with
9770*4882a593Smuzhiyun * the IBQs, then the OBQs.
9771*4882a593Smuzhiyun */
t4_read_cimq_cfg(struct adapter * adap,u16 * base,u16 * size,u16 * thres)9772*4882a593Smuzhiyun void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9773*4882a593Smuzhiyun {
9774*4882a593Smuzhiyun unsigned int i, v;
9775*4882a593Smuzhiyun int cim_num_obq = is_t4(adap->params.chip) ?
9776*4882a593Smuzhiyun CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9777*4882a593Smuzhiyun
9778*4882a593Smuzhiyun for (i = 0; i < CIM_NUM_IBQ; i++) {
9779*4882a593Smuzhiyun t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9780*4882a593Smuzhiyun QUENUMSELECT_V(i));
9781*4882a593Smuzhiyun v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9782*4882a593Smuzhiyun /* value is in 256-byte units */
9783*4882a593Smuzhiyun *base++ = CIMQBASE_G(v) * 256;
9784*4882a593Smuzhiyun *size++ = CIMQSIZE_G(v) * 256;
9785*4882a593Smuzhiyun *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9786*4882a593Smuzhiyun }
9787*4882a593Smuzhiyun for (i = 0; i < cim_num_obq; i++) {
9788*4882a593Smuzhiyun t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9789*4882a593Smuzhiyun QUENUMSELECT_V(i));
9790*4882a593Smuzhiyun v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9791*4882a593Smuzhiyun /* value is in 256-byte units */
9792*4882a593Smuzhiyun *base++ = CIMQBASE_G(v) * 256;
9793*4882a593Smuzhiyun *size++ = CIMQSIZE_G(v) * 256;
9794*4882a593Smuzhiyun }
9795*4882a593Smuzhiyun }
9796*4882a593Smuzhiyun
9797*4882a593Smuzhiyun /**
9798*4882a593Smuzhiyun * t4_read_cim_ibq - read the contents of a CIM inbound queue
9799*4882a593Smuzhiyun * @adap: the adapter
9800*4882a593Smuzhiyun * @qid: the queue index
9801*4882a593Smuzhiyun * @data: where to store the queue contents
9802*4882a593Smuzhiyun * @n: capacity of @data in 32-bit words
9803*4882a593Smuzhiyun *
9804*4882a593Smuzhiyun * Reads the contents of the selected CIM queue starting at address 0 up
9805*4882a593Smuzhiyun * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9806*4882a593Smuzhiyun * error and the number of 32-bit words actually read on success.
9807*4882a593Smuzhiyun */
t4_read_cim_ibq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)9808*4882a593Smuzhiyun int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9809*4882a593Smuzhiyun {
9810*4882a593Smuzhiyun int i, err, attempts;
9811*4882a593Smuzhiyun unsigned int addr;
9812*4882a593Smuzhiyun const unsigned int nwords = CIM_IBQ_SIZE * 4;
9813*4882a593Smuzhiyun
9814*4882a593Smuzhiyun if (qid > 5 || (n & 3))
9815*4882a593Smuzhiyun return -EINVAL;
9816*4882a593Smuzhiyun
9817*4882a593Smuzhiyun addr = qid * nwords;
9818*4882a593Smuzhiyun if (n > nwords)
9819*4882a593Smuzhiyun n = nwords;
9820*4882a593Smuzhiyun
9821*4882a593Smuzhiyun /* It might take 3-10ms before the IBQ debug read access is allowed.
9822*4882a593Smuzhiyun * Wait for 1 Sec with a delay of 1 usec.
9823*4882a593Smuzhiyun */
9824*4882a593Smuzhiyun attempts = 1000000;
9825*4882a593Smuzhiyun
9826*4882a593Smuzhiyun for (i = 0; i < n; i++, addr++) {
9827*4882a593Smuzhiyun t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9828*4882a593Smuzhiyun IBQDBGEN_F);
9829*4882a593Smuzhiyun err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9830*4882a593Smuzhiyun attempts, 1);
9831*4882a593Smuzhiyun if (err)
9832*4882a593Smuzhiyun return err;
9833*4882a593Smuzhiyun *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9834*4882a593Smuzhiyun }
9835*4882a593Smuzhiyun t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9836*4882a593Smuzhiyun return i;
9837*4882a593Smuzhiyun }
9838*4882a593Smuzhiyun
9839*4882a593Smuzhiyun /**
9840*4882a593Smuzhiyun * t4_read_cim_obq - read the contents of a CIM outbound queue
9841*4882a593Smuzhiyun * @adap: the adapter
9842*4882a593Smuzhiyun * @qid: the queue index
9843*4882a593Smuzhiyun * @data: where to store the queue contents
9844*4882a593Smuzhiyun * @n: capacity of @data in 32-bit words
9845*4882a593Smuzhiyun *
9846*4882a593Smuzhiyun * Reads the contents of the selected CIM queue starting at address 0 up
9847*4882a593Smuzhiyun * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9848*4882a593Smuzhiyun * error and the number of 32-bit words actually read on success.
9849*4882a593Smuzhiyun */
t4_read_cim_obq(struct adapter * adap,unsigned int qid,u32 * data,size_t n)9850*4882a593Smuzhiyun int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9851*4882a593Smuzhiyun {
9852*4882a593Smuzhiyun int i, err;
9853*4882a593Smuzhiyun unsigned int addr, v, nwords;
9854*4882a593Smuzhiyun int cim_num_obq = is_t4(adap->params.chip) ?
9855*4882a593Smuzhiyun CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9856*4882a593Smuzhiyun
9857*4882a593Smuzhiyun if ((qid > (cim_num_obq - 1)) || (n & 3))
9858*4882a593Smuzhiyun return -EINVAL;
9859*4882a593Smuzhiyun
9860*4882a593Smuzhiyun t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9861*4882a593Smuzhiyun QUENUMSELECT_V(qid));
9862*4882a593Smuzhiyun v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9863*4882a593Smuzhiyun
9864*4882a593Smuzhiyun addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9865*4882a593Smuzhiyun nwords = CIMQSIZE_G(v) * 64; /* same */
9866*4882a593Smuzhiyun if (n > nwords)
9867*4882a593Smuzhiyun n = nwords;
9868*4882a593Smuzhiyun
9869*4882a593Smuzhiyun for (i = 0; i < n; i++, addr++) {
9870*4882a593Smuzhiyun t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9871*4882a593Smuzhiyun OBQDBGEN_F);
9872*4882a593Smuzhiyun err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9873*4882a593Smuzhiyun 2, 1);
9874*4882a593Smuzhiyun if (err)
9875*4882a593Smuzhiyun return err;
9876*4882a593Smuzhiyun *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9877*4882a593Smuzhiyun }
9878*4882a593Smuzhiyun t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9879*4882a593Smuzhiyun return i;
9880*4882a593Smuzhiyun }
9881*4882a593Smuzhiyun
9882*4882a593Smuzhiyun /**
9883*4882a593Smuzhiyun * t4_cim_read - read a block from CIM internal address space
9884*4882a593Smuzhiyun * @adap: the adapter
9885*4882a593Smuzhiyun * @addr: the start address within the CIM address space
9886*4882a593Smuzhiyun * @n: number of words to read
9887*4882a593Smuzhiyun * @valp: where to store the result
9888*4882a593Smuzhiyun *
9889*4882a593Smuzhiyun * Reads a block of 4-byte words from the CIM intenal address space.
9890*4882a593Smuzhiyun */
t4_cim_read(struct adapter * adap,unsigned int addr,unsigned int n,unsigned int * valp)9891*4882a593Smuzhiyun int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9892*4882a593Smuzhiyun unsigned int *valp)
9893*4882a593Smuzhiyun {
9894*4882a593Smuzhiyun int ret = 0;
9895*4882a593Smuzhiyun
9896*4882a593Smuzhiyun if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9897*4882a593Smuzhiyun return -EBUSY;
9898*4882a593Smuzhiyun
9899*4882a593Smuzhiyun for ( ; !ret && n--; addr += 4) {
9900*4882a593Smuzhiyun t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9901*4882a593Smuzhiyun ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9902*4882a593Smuzhiyun 0, 5, 2);
9903*4882a593Smuzhiyun if (!ret)
9904*4882a593Smuzhiyun *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9905*4882a593Smuzhiyun }
9906*4882a593Smuzhiyun return ret;
9907*4882a593Smuzhiyun }
9908*4882a593Smuzhiyun
9909*4882a593Smuzhiyun /**
9910*4882a593Smuzhiyun * t4_cim_write - write a block into CIM internal address space
9911*4882a593Smuzhiyun * @adap: the adapter
9912*4882a593Smuzhiyun * @addr: the start address within the CIM address space
9913*4882a593Smuzhiyun * @n: number of words to write
9914*4882a593Smuzhiyun * @valp: set of values to write
9915*4882a593Smuzhiyun *
9916*4882a593Smuzhiyun * Writes a block of 4-byte words into the CIM intenal address space.
9917*4882a593Smuzhiyun */
t4_cim_write(struct adapter * adap,unsigned int addr,unsigned int n,const unsigned int * valp)9918*4882a593Smuzhiyun int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9919*4882a593Smuzhiyun const unsigned int *valp)
9920*4882a593Smuzhiyun {
9921*4882a593Smuzhiyun int ret = 0;
9922*4882a593Smuzhiyun
9923*4882a593Smuzhiyun if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9924*4882a593Smuzhiyun return -EBUSY;
9925*4882a593Smuzhiyun
9926*4882a593Smuzhiyun for ( ; !ret && n--; addr += 4) {
9927*4882a593Smuzhiyun t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9928*4882a593Smuzhiyun t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9929*4882a593Smuzhiyun ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9930*4882a593Smuzhiyun 0, 5, 2);
9931*4882a593Smuzhiyun }
9932*4882a593Smuzhiyun return ret;
9933*4882a593Smuzhiyun }
9934*4882a593Smuzhiyun
t4_cim_write1(struct adapter * adap,unsigned int addr,unsigned int val)9935*4882a593Smuzhiyun static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9936*4882a593Smuzhiyun unsigned int val)
9937*4882a593Smuzhiyun {
9938*4882a593Smuzhiyun return t4_cim_write(adap, addr, 1, &val);
9939*4882a593Smuzhiyun }
9940*4882a593Smuzhiyun
9941*4882a593Smuzhiyun /**
9942*4882a593Smuzhiyun * t4_cim_read_la - read CIM LA capture buffer
9943*4882a593Smuzhiyun * @adap: the adapter
9944*4882a593Smuzhiyun * @la_buf: where to store the LA data
9945*4882a593Smuzhiyun * @wrptr: the HW write pointer within the capture buffer
9946*4882a593Smuzhiyun *
9947*4882a593Smuzhiyun * Reads the contents of the CIM LA buffer with the most recent entry at
9948*4882a593Smuzhiyun * the end of the returned data and with the entry at @wrptr first.
9949*4882a593Smuzhiyun * We try to leave the LA in the running state we find it in.
9950*4882a593Smuzhiyun */
t4_cim_read_la(struct adapter * adap,u32 * la_buf,unsigned int * wrptr)9951*4882a593Smuzhiyun int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9952*4882a593Smuzhiyun {
9953*4882a593Smuzhiyun int i, ret;
9954*4882a593Smuzhiyun unsigned int cfg, val, idx;
9955*4882a593Smuzhiyun
9956*4882a593Smuzhiyun ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9957*4882a593Smuzhiyun if (ret)
9958*4882a593Smuzhiyun return ret;
9959*4882a593Smuzhiyun
9960*4882a593Smuzhiyun if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9961*4882a593Smuzhiyun ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9962*4882a593Smuzhiyun if (ret)
9963*4882a593Smuzhiyun return ret;
9964*4882a593Smuzhiyun }
9965*4882a593Smuzhiyun
9966*4882a593Smuzhiyun ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9967*4882a593Smuzhiyun if (ret)
9968*4882a593Smuzhiyun goto restart;
9969*4882a593Smuzhiyun
9970*4882a593Smuzhiyun idx = UPDBGLAWRPTR_G(val);
9971*4882a593Smuzhiyun if (wrptr)
9972*4882a593Smuzhiyun *wrptr = idx;
9973*4882a593Smuzhiyun
9974*4882a593Smuzhiyun for (i = 0; i < adap->params.cim_la_size; i++) {
9975*4882a593Smuzhiyun ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9976*4882a593Smuzhiyun UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9977*4882a593Smuzhiyun if (ret)
9978*4882a593Smuzhiyun break;
9979*4882a593Smuzhiyun ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9980*4882a593Smuzhiyun if (ret)
9981*4882a593Smuzhiyun break;
9982*4882a593Smuzhiyun if (val & UPDBGLARDEN_F) {
9983*4882a593Smuzhiyun ret = -ETIMEDOUT;
9984*4882a593Smuzhiyun break;
9985*4882a593Smuzhiyun }
9986*4882a593Smuzhiyun ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9987*4882a593Smuzhiyun if (ret)
9988*4882a593Smuzhiyun break;
9989*4882a593Smuzhiyun
9990*4882a593Smuzhiyun /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9991*4882a593Smuzhiyun * identify the 32-bit portion of the full 312-bit data
9992*4882a593Smuzhiyun */
9993*4882a593Smuzhiyun if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9994*4882a593Smuzhiyun idx = (idx & 0xff0) + 0x10;
9995*4882a593Smuzhiyun else
9996*4882a593Smuzhiyun idx++;
9997*4882a593Smuzhiyun /* address can't exceed 0xfff */
9998*4882a593Smuzhiyun idx &= UPDBGLARDPTR_M;
9999*4882a593Smuzhiyun }
10000*4882a593Smuzhiyun restart:
10001*4882a593Smuzhiyun if (cfg & UPDBGLAEN_F) {
10002*4882a593Smuzhiyun int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
10003*4882a593Smuzhiyun cfg & ~UPDBGLARDEN_F);
10004*4882a593Smuzhiyun if (!ret)
10005*4882a593Smuzhiyun ret = r;
10006*4882a593Smuzhiyun }
10007*4882a593Smuzhiyun return ret;
10008*4882a593Smuzhiyun }
10009*4882a593Smuzhiyun
10010*4882a593Smuzhiyun /**
10011*4882a593Smuzhiyun * t4_tp_read_la - read TP LA capture buffer
10012*4882a593Smuzhiyun * @adap: the adapter
10013*4882a593Smuzhiyun * @la_buf: where to store the LA data
10014*4882a593Smuzhiyun * @wrptr: the HW write pointer within the capture buffer
10015*4882a593Smuzhiyun *
10016*4882a593Smuzhiyun * Reads the contents of the TP LA buffer with the most recent entry at
10017*4882a593Smuzhiyun * the end of the returned data and with the entry at @wrptr first.
10018*4882a593Smuzhiyun * We leave the LA in the running state we find it in.
10019*4882a593Smuzhiyun */
t4_tp_read_la(struct adapter * adap,u64 * la_buf,unsigned int * wrptr)10020*4882a593Smuzhiyun void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
10021*4882a593Smuzhiyun {
10022*4882a593Smuzhiyun bool last_incomplete;
10023*4882a593Smuzhiyun unsigned int i, cfg, val, idx;
10024*4882a593Smuzhiyun
10025*4882a593Smuzhiyun cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
10026*4882a593Smuzhiyun if (cfg & DBGLAENABLE_F) /* freeze LA */
10027*4882a593Smuzhiyun t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10028*4882a593Smuzhiyun adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
10029*4882a593Smuzhiyun
10030*4882a593Smuzhiyun val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
10031*4882a593Smuzhiyun idx = DBGLAWPTR_G(val);
10032*4882a593Smuzhiyun last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
10033*4882a593Smuzhiyun if (last_incomplete)
10034*4882a593Smuzhiyun idx = (idx + 1) & DBGLARPTR_M;
10035*4882a593Smuzhiyun if (wrptr)
10036*4882a593Smuzhiyun *wrptr = idx;
10037*4882a593Smuzhiyun
10038*4882a593Smuzhiyun val &= 0xffff;
10039*4882a593Smuzhiyun val &= ~DBGLARPTR_V(DBGLARPTR_M);
10040*4882a593Smuzhiyun val |= adap->params.tp.la_mask;
10041*4882a593Smuzhiyun
10042*4882a593Smuzhiyun for (i = 0; i < TPLA_SIZE; i++) {
10043*4882a593Smuzhiyun t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
10044*4882a593Smuzhiyun la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
10045*4882a593Smuzhiyun idx = (idx + 1) & DBGLARPTR_M;
10046*4882a593Smuzhiyun }
10047*4882a593Smuzhiyun
10048*4882a593Smuzhiyun /* Wipe out last entry if it isn't valid */
10049*4882a593Smuzhiyun if (last_incomplete)
10050*4882a593Smuzhiyun la_buf[TPLA_SIZE - 1] = ~0ULL;
10051*4882a593Smuzhiyun
10052*4882a593Smuzhiyun if (cfg & DBGLAENABLE_F) /* restore running state */
10053*4882a593Smuzhiyun t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
10054*4882a593Smuzhiyun cfg | adap->params.tp.la_mask);
10055*4882a593Smuzhiyun }
10056*4882a593Smuzhiyun
10057*4882a593Smuzhiyun /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
10058*4882a593Smuzhiyun * seconds). If we find one of the SGE Ingress DMA State Machines in the same
10059*4882a593Smuzhiyun * state for more than the Warning Threshold then we'll issue a warning about
10060*4882a593Smuzhiyun * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
10061*4882a593Smuzhiyun * appears to be hung every Warning Repeat second till the situation clears.
10062*4882a593Smuzhiyun * If the situation clears, we'll note that as well.
10063*4882a593Smuzhiyun */
10064*4882a593Smuzhiyun #define SGE_IDMA_WARN_THRESH 1
10065*4882a593Smuzhiyun #define SGE_IDMA_WARN_REPEAT 300
10066*4882a593Smuzhiyun
10067*4882a593Smuzhiyun /**
10068*4882a593Smuzhiyun * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
10069*4882a593Smuzhiyun * @adapter: the adapter
10070*4882a593Smuzhiyun * @idma: the adapter IDMA Monitor state
10071*4882a593Smuzhiyun *
10072*4882a593Smuzhiyun * Initialize the state of an SGE Ingress DMA Monitor.
10073*4882a593Smuzhiyun */
t4_idma_monitor_init(struct adapter * adapter,struct sge_idma_monitor_state * idma)10074*4882a593Smuzhiyun void t4_idma_monitor_init(struct adapter *adapter,
10075*4882a593Smuzhiyun struct sge_idma_monitor_state *idma)
10076*4882a593Smuzhiyun {
10077*4882a593Smuzhiyun /* Initialize the state variables for detecting an SGE Ingress DMA
10078*4882a593Smuzhiyun * hang. The SGE has internal counters which count up on each clock
10079*4882a593Smuzhiyun * tick whenever the SGE finds its Ingress DMA State Engines in the
10080*4882a593Smuzhiyun * same state they were on the previous clock tick. The clock used is
10081*4882a593Smuzhiyun * the Core Clock so we have a limit on the maximum "time" they can
10082*4882a593Smuzhiyun * record; typically a very small number of seconds. For instance,
10083*4882a593Smuzhiyun * with a 600MHz Core Clock, we can only count up to a bit more than
10084*4882a593Smuzhiyun * 7s. So we'll synthesize a larger counter in order to not run the
10085*4882a593Smuzhiyun * risk of having the "timers" overflow and give us the flexibility to
10086*4882a593Smuzhiyun * maintain a Hung SGE State Machine of our own which operates across
10087*4882a593Smuzhiyun * a longer time frame.
10088*4882a593Smuzhiyun */
10089*4882a593Smuzhiyun idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
10090*4882a593Smuzhiyun idma->idma_stalled[0] = 0;
10091*4882a593Smuzhiyun idma->idma_stalled[1] = 0;
10092*4882a593Smuzhiyun }
10093*4882a593Smuzhiyun
10094*4882a593Smuzhiyun /**
10095*4882a593Smuzhiyun * t4_idma_monitor - monitor SGE Ingress DMA state
10096*4882a593Smuzhiyun * @adapter: the adapter
10097*4882a593Smuzhiyun * @idma: the adapter IDMA Monitor state
10098*4882a593Smuzhiyun * @hz: number of ticks/second
10099*4882a593Smuzhiyun * @ticks: number of ticks since the last IDMA Monitor call
10100*4882a593Smuzhiyun */
t4_idma_monitor(struct adapter * adapter,struct sge_idma_monitor_state * idma,int hz,int ticks)10101*4882a593Smuzhiyun void t4_idma_monitor(struct adapter *adapter,
10102*4882a593Smuzhiyun struct sge_idma_monitor_state *idma,
10103*4882a593Smuzhiyun int hz, int ticks)
10104*4882a593Smuzhiyun {
10105*4882a593Smuzhiyun int i, idma_same_state_cnt[2];
10106*4882a593Smuzhiyun
10107*4882a593Smuzhiyun /* Read the SGE Debug Ingress DMA Same State Count registers. These
10108*4882a593Smuzhiyun * are counters inside the SGE which count up on each clock when the
10109*4882a593Smuzhiyun * SGE finds its Ingress DMA State Engines in the same states they
10110*4882a593Smuzhiyun * were in the previous clock. The counters will peg out at
10111*4882a593Smuzhiyun * 0xffffffff without wrapping around so once they pass the 1s
10112*4882a593Smuzhiyun * threshold they'll stay above that till the IDMA state changes.
10113*4882a593Smuzhiyun */
10114*4882a593Smuzhiyun t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
10115*4882a593Smuzhiyun idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
10116*4882a593Smuzhiyun idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10117*4882a593Smuzhiyun
10118*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
10119*4882a593Smuzhiyun u32 debug0, debug11;
10120*4882a593Smuzhiyun
10121*4882a593Smuzhiyun /* If the Ingress DMA Same State Counter ("timer") is less
10122*4882a593Smuzhiyun * than 1s, then we can reset our synthesized Stall Timer and
10123*4882a593Smuzhiyun * continue. If we have previously emitted warnings about a
10124*4882a593Smuzhiyun * potential stalled Ingress Queue, issue a note indicating
10125*4882a593Smuzhiyun * that the Ingress Queue has resumed forward progress.
10126*4882a593Smuzhiyun */
10127*4882a593Smuzhiyun if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
10128*4882a593Smuzhiyun if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
10129*4882a593Smuzhiyun dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
10130*4882a593Smuzhiyun "resumed after %d seconds\n",
10131*4882a593Smuzhiyun i, idma->idma_qid[i],
10132*4882a593Smuzhiyun idma->idma_stalled[i] / hz);
10133*4882a593Smuzhiyun idma->idma_stalled[i] = 0;
10134*4882a593Smuzhiyun continue;
10135*4882a593Smuzhiyun }
10136*4882a593Smuzhiyun
10137*4882a593Smuzhiyun /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
10138*4882a593Smuzhiyun * domain. The first time we get here it'll be because we
10139*4882a593Smuzhiyun * passed the 1s Threshold; each additional time it'll be
10140*4882a593Smuzhiyun * because the RX Timer Callback is being fired on its regular
10141*4882a593Smuzhiyun * schedule.
10142*4882a593Smuzhiyun *
10143*4882a593Smuzhiyun * If the stall is below our Potential Hung Ingress Queue
10144*4882a593Smuzhiyun * Warning Threshold, continue.
10145*4882a593Smuzhiyun */
10146*4882a593Smuzhiyun if (idma->idma_stalled[i] == 0) {
10147*4882a593Smuzhiyun idma->idma_stalled[i] = hz;
10148*4882a593Smuzhiyun idma->idma_warn[i] = 0;
10149*4882a593Smuzhiyun } else {
10150*4882a593Smuzhiyun idma->idma_stalled[i] += ticks;
10151*4882a593Smuzhiyun idma->idma_warn[i] -= ticks;
10152*4882a593Smuzhiyun }
10153*4882a593Smuzhiyun
10154*4882a593Smuzhiyun if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
10155*4882a593Smuzhiyun continue;
10156*4882a593Smuzhiyun
10157*4882a593Smuzhiyun /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
10158*4882a593Smuzhiyun */
10159*4882a593Smuzhiyun if (idma->idma_warn[i] > 0)
10160*4882a593Smuzhiyun continue;
10161*4882a593Smuzhiyun idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
10162*4882a593Smuzhiyun
10163*4882a593Smuzhiyun /* Read and save the SGE IDMA State and Queue ID information.
10164*4882a593Smuzhiyun * We do this every time in case it changes across time ...
10165*4882a593Smuzhiyun * can't be too careful ...
10166*4882a593Smuzhiyun */
10167*4882a593Smuzhiyun t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
10168*4882a593Smuzhiyun debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10169*4882a593Smuzhiyun idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
10170*4882a593Smuzhiyun
10171*4882a593Smuzhiyun t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
10172*4882a593Smuzhiyun debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
10173*4882a593Smuzhiyun idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
10174*4882a593Smuzhiyun
10175*4882a593Smuzhiyun dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
10176*4882a593Smuzhiyun "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
10177*4882a593Smuzhiyun i, idma->idma_qid[i], idma->idma_state[i],
10178*4882a593Smuzhiyun idma->idma_stalled[i] / hz,
10179*4882a593Smuzhiyun debug0, debug11);
10180*4882a593Smuzhiyun t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
10181*4882a593Smuzhiyun }
10182*4882a593Smuzhiyun }
10183*4882a593Smuzhiyun
10184*4882a593Smuzhiyun /**
10185*4882a593Smuzhiyun * t4_load_cfg - download config file
10186*4882a593Smuzhiyun * @adap: the adapter
10187*4882a593Smuzhiyun * @cfg_data: the cfg text file to write
10188*4882a593Smuzhiyun * @size: text file size
10189*4882a593Smuzhiyun *
10190*4882a593Smuzhiyun * Write the supplied config text file to the card's serial flash.
10191*4882a593Smuzhiyun */
t4_load_cfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)10192*4882a593Smuzhiyun int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10193*4882a593Smuzhiyun {
10194*4882a593Smuzhiyun int ret, i, n, cfg_addr;
10195*4882a593Smuzhiyun unsigned int addr;
10196*4882a593Smuzhiyun unsigned int flash_cfg_start_sec;
10197*4882a593Smuzhiyun unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10198*4882a593Smuzhiyun
10199*4882a593Smuzhiyun cfg_addr = t4_flash_cfg_addr(adap);
10200*4882a593Smuzhiyun if (cfg_addr < 0)
10201*4882a593Smuzhiyun return cfg_addr;
10202*4882a593Smuzhiyun
10203*4882a593Smuzhiyun addr = cfg_addr;
10204*4882a593Smuzhiyun flash_cfg_start_sec = addr / SF_SEC_SIZE;
10205*4882a593Smuzhiyun
10206*4882a593Smuzhiyun if (size > FLASH_CFG_MAX_SIZE) {
10207*4882a593Smuzhiyun dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
10208*4882a593Smuzhiyun FLASH_CFG_MAX_SIZE);
10209*4882a593Smuzhiyun return -EFBIG;
10210*4882a593Smuzhiyun }
10211*4882a593Smuzhiyun
10212*4882a593Smuzhiyun i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
10213*4882a593Smuzhiyun sf_sec_size);
10214*4882a593Smuzhiyun ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10215*4882a593Smuzhiyun flash_cfg_start_sec + i - 1);
10216*4882a593Smuzhiyun /* If size == 0 then we're simply erasing the FLASH sectors associated
10217*4882a593Smuzhiyun * with the on-adapter Firmware Configuration File.
10218*4882a593Smuzhiyun */
10219*4882a593Smuzhiyun if (ret || size == 0)
10220*4882a593Smuzhiyun goto out;
10221*4882a593Smuzhiyun
10222*4882a593Smuzhiyun /* this will write to the flash up to SF_PAGE_SIZE at a time */
10223*4882a593Smuzhiyun for (i = 0; i < size; i += SF_PAGE_SIZE) {
10224*4882a593Smuzhiyun if ((size - i) < SF_PAGE_SIZE)
10225*4882a593Smuzhiyun n = size - i;
10226*4882a593Smuzhiyun else
10227*4882a593Smuzhiyun n = SF_PAGE_SIZE;
10228*4882a593Smuzhiyun ret = t4_write_flash(adap, addr, n, cfg_data, true);
10229*4882a593Smuzhiyun if (ret)
10230*4882a593Smuzhiyun goto out;
10231*4882a593Smuzhiyun
10232*4882a593Smuzhiyun addr += SF_PAGE_SIZE;
10233*4882a593Smuzhiyun cfg_data += SF_PAGE_SIZE;
10234*4882a593Smuzhiyun }
10235*4882a593Smuzhiyun
10236*4882a593Smuzhiyun out:
10237*4882a593Smuzhiyun if (ret)
10238*4882a593Smuzhiyun dev_err(adap->pdev_dev, "config file %s failed %d\n",
10239*4882a593Smuzhiyun (size == 0 ? "clear" : "download"), ret);
10240*4882a593Smuzhiyun return ret;
10241*4882a593Smuzhiyun }
10242*4882a593Smuzhiyun
10243*4882a593Smuzhiyun /**
10244*4882a593Smuzhiyun * t4_set_vf_mac - Set MAC address for the specified VF
10245*4882a593Smuzhiyun * @adapter: The adapter
10246*4882a593Smuzhiyun * @vf: one of the VFs instantiated by the specified PF
10247*4882a593Smuzhiyun * @naddr: the number of MAC addresses
10248*4882a593Smuzhiyun * @addr: the MAC address(es) to be set to the specified VF
10249*4882a593Smuzhiyun */
t4_set_vf_mac_acl(struct adapter * adapter,unsigned int vf,unsigned int naddr,u8 * addr)10250*4882a593Smuzhiyun int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
10251*4882a593Smuzhiyun unsigned int naddr, u8 *addr)
10252*4882a593Smuzhiyun {
10253*4882a593Smuzhiyun struct fw_acl_mac_cmd cmd;
10254*4882a593Smuzhiyun
10255*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
10256*4882a593Smuzhiyun cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
10257*4882a593Smuzhiyun FW_CMD_REQUEST_F |
10258*4882a593Smuzhiyun FW_CMD_WRITE_F |
10259*4882a593Smuzhiyun FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
10260*4882a593Smuzhiyun FW_ACL_MAC_CMD_VFN_V(vf));
10261*4882a593Smuzhiyun
10262*4882a593Smuzhiyun /* Note: Do not enable the ACL */
10263*4882a593Smuzhiyun cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
10264*4882a593Smuzhiyun cmd.nmac = naddr;
10265*4882a593Smuzhiyun
10266*4882a593Smuzhiyun switch (adapter->pf) {
10267*4882a593Smuzhiyun case 3:
10268*4882a593Smuzhiyun memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
10269*4882a593Smuzhiyun break;
10270*4882a593Smuzhiyun case 2:
10271*4882a593Smuzhiyun memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
10272*4882a593Smuzhiyun break;
10273*4882a593Smuzhiyun case 1:
10274*4882a593Smuzhiyun memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
10275*4882a593Smuzhiyun break;
10276*4882a593Smuzhiyun case 0:
10277*4882a593Smuzhiyun memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
10278*4882a593Smuzhiyun break;
10279*4882a593Smuzhiyun }
10280*4882a593Smuzhiyun
10281*4882a593Smuzhiyun return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
10282*4882a593Smuzhiyun }
10283*4882a593Smuzhiyun
10284*4882a593Smuzhiyun /**
10285*4882a593Smuzhiyun * t4_read_pace_tbl - read the pace table
10286*4882a593Smuzhiyun * @adap: the adapter
10287*4882a593Smuzhiyun * @pace_vals: holds the returned values
10288*4882a593Smuzhiyun *
10289*4882a593Smuzhiyun * Returns the values of TP's pace table in microseconds.
10290*4882a593Smuzhiyun */
t4_read_pace_tbl(struct adapter * adap,unsigned int pace_vals[NTX_SCHED])10291*4882a593Smuzhiyun void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
10292*4882a593Smuzhiyun {
10293*4882a593Smuzhiyun unsigned int i, v;
10294*4882a593Smuzhiyun
10295*4882a593Smuzhiyun for (i = 0; i < NTX_SCHED; i++) {
10296*4882a593Smuzhiyun t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
10297*4882a593Smuzhiyun v = t4_read_reg(adap, TP_PACE_TABLE_A);
10298*4882a593Smuzhiyun pace_vals[i] = dack_ticks_to_usec(adap, v);
10299*4882a593Smuzhiyun }
10300*4882a593Smuzhiyun }
10301*4882a593Smuzhiyun
10302*4882a593Smuzhiyun /**
10303*4882a593Smuzhiyun * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10304*4882a593Smuzhiyun * @adap: the adapter
10305*4882a593Smuzhiyun * @sched: the scheduler index
10306*4882a593Smuzhiyun * @kbps: the byte rate in Kbps
10307*4882a593Smuzhiyun * @ipg: the interpacket delay in tenths of nanoseconds
10308*4882a593Smuzhiyun * @sleep_ok: if true we may sleep while awaiting command completion
10309*4882a593Smuzhiyun *
10310*4882a593Smuzhiyun * Return the current configuration of a HW Tx scheduler.
10311*4882a593Smuzhiyun */
t4_get_tx_sched(struct adapter * adap,unsigned int sched,unsigned int * kbps,unsigned int * ipg,bool sleep_ok)10312*4882a593Smuzhiyun void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10313*4882a593Smuzhiyun unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10314*4882a593Smuzhiyun {
10315*4882a593Smuzhiyun unsigned int v, addr, bpt, cpt;
10316*4882a593Smuzhiyun
10317*4882a593Smuzhiyun if (kbps) {
10318*4882a593Smuzhiyun addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10319*4882a593Smuzhiyun t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10320*4882a593Smuzhiyun if (sched & 1)
10321*4882a593Smuzhiyun v >>= 16;
10322*4882a593Smuzhiyun bpt = (v >> 8) & 0xff;
10323*4882a593Smuzhiyun cpt = v & 0xff;
10324*4882a593Smuzhiyun if (!cpt) {
10325*4882a593Smuzhiyun *kbps = 0; /* scheduler disabled */
10326*4882a593Smuzhiyun } else {
10327*4882a593Smuzhiyun v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10328*4882a593Smuzhiyun *kbps = (v * bpt) / 125;
10329*4882a593Smuzhiyun }
10330*4882a593Smuzhiyun }
10331*4882a593Smuzhiyun if (ipg) {
10332*4882a593Smuzhiyun addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10333*4882a593Smuzhiyun t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10334*4882a593Smuzhiyun if (sched & 1)
10335*4882a593Smuzhiyun v >>= 16;
10336*4882a593Smuzhiyun v &= 0xffff;
10337*4882a593Smuzhiyun *ipg = (10000 * v) / core_ticks_per_usec(adap);
10338*4882a593Smuzhiyun }
10339*4882a593Smuzhiyun }
10340*4882a593Smuzhiyun
10341*4882a593Smuzhiyun /* t4_sge_ctxt_rd - read an SGE context through FW
10342*4882a593Smuzhiyun * @adap: the adapter
10343*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
10344*4882a593Smuzhiyun * @cid: the context id
10345*4882a593Smuzhiyun * @ctype: the context type
10346*4882a593Smuzhiyun * @data: where to store the context data
10347*4882a593Smuzhiyun *
10348*4882a593Smuzhiyun * Issues a FW command through the given mailbox to read an SGE context.
10349*4882a593Smuzhiyun */
t4_sge_ctxt_rd(struct adapter * adap,unsigned int mbox,unsigned int cid,enum ctxt_type ctype,u32 * data)10350*4882a593Smuzhiyun int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10351*4882a593Smuzhiyun enum ctxt_type ctype, u32 *data)
10352*4882a593Smuzhiyun {
10353*4882a593Smuzhiyun struct fw_ldst_cmd c;
10354*4882a593Smuzhiyun int ret;
10355*4882a593Smuzhiyun
10356*4882a593Smuzhiyun if (ctype == CTXT_FLM)
10357*4882a593Smuzhiyun ret = FW_LDST_ADDRSPC_SGE_FLMC;
10358*4882a593Smuzhiyun else
10359*4882a593Smuzhiyun ret = FW_LDST_ADDRSPC_SGE_CONMC;
10360*4882a593Smuzhiyun
10361*4882a593Smuzhiyun memset(&c, 0, sizeof(c));
10362*4882a593Smuzhiyun c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10363*4882a593Smuzhiyun FW_CMD_REQUEST_F | FW_CMD_READ_F |
10364*4882a593Smuzhiyun FW_LDST_CMD_ADDRSPACE_V(ret));
10365*4882a593Smuzhiyun c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10366*4882a593Smuzhiyun c.u.idctxt.physid = cpu_to_be32(cid);
10367*4882a593Smuzhiyun
10368*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10369*4882a593Smuzhiyun if (ret == 0) {
10370*4882a593Smuzhiyun data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10371*4882a593Smuzhiyun data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10372*4882a593Smuzhiyun data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10373*4882a593Smuzhiyun data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10374*4882a593Smuzhiyun data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10375*4882a593Smuzhiyun data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10376*4882a593Smuzhiyun }
10377*4882a593Smuzhiyun return ret;
10378*4882a593Smuzhiyun }
10379*4882a593Smuzhiyun
10380*4882a593Smuzhiyun /**
10381*4882a593Smuzhiyun * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10382*4882a593Smuzhiyun * @adap: the adapter
10383*4882a593Smuzhiyun * @cid: the context id
10384*4882a593Smuzhiyun * @ctype: the context type
10385*4882a593Smuzhiyun * @data: where to store the context data
10386*4882a593Smuzhiyun *
10387*4882a593Smuzhiyun * Reads an SGE context directly, bypassing FW. This is only for
10388*4882a593Smuzhiyun * debugging when FW is unavailable.
10389*4882a593Smuzhiyun */
t4_sge_ctxt_rd_bd(struct adapter * adap,unsigned int cid,enum ctxt_type ctype,u32 * data)10390*4882a593Smuzhiyun int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10391*4882a593Smuzhiyun enum ctxt_type ctype, u32 *data)
10392*4882a593Smuzhiyun {
10393*4882a593Smuzhiyun int i, ret;
10394*4882a593Smuzhiyun
10395*4882a593Smuzhiyun t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10396*4882a593Smuzhiyun ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10397*4882a593Smuzhiyun if (!ret)
10398*4882a593Smuzhiyun for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10399*4882a593Smuzhiyun *data++ = t4_read_reg(adap, i);
10400*4882a593Smuzhiyun return ret;
10401*4882a593Smuzhiyun }
10402*4882a593Smuzhiyun
t4_sched_params(struct adapter * adapter,u8 type,u8 level,u8 mode,u8 rateunit,u8 ratemode,u8 channel,u8 class,u32 minrate,u32 maxrate,u16 weight,u16 pktsize,u16 burstsize)10403*4882a593Smuzhiyun int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
10404*4882a593Smuzhiyun u8 rateunit, u8 ratemode, u8 channel, u8 class,
10405*4882a593Smuzhiyun u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
10406*4882a593Smuzhiyun u16 burstsize)
10407*4882a593Smuzhiyun {
10408*4882a593Smuzhiyun struct fw_sched_cmd cmd;
10409*4882a593Smuzhiyun
10410*4882a593Smuzhiyun memset(&cmd, 0, sizeof(cmd));
10411*4882a593Smuzhiyun cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10412*4882a593Smuzhiyun FW_CMD_REQUEST_F |
10413*4882a593Smuzhiyun FW_CMD_WRITE_F);
10414*4882a593Smuzhiyun cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10415*4882a593Smuzhiyun
10416*4882a593Smuzhiyun cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10417*4882a593Smuzhiyun cmd.u.params.type = type;
10418*4882a593Smuzhiyun cmd.u.params.level = level;
10419*4882a593Smuzhiyun cmd.u.params.mode = mode;
10420*4882a593Smuzhiyun cmd.u.params.ch = channel;
10421*4882a593Smuzhiyun cmd.u.params.cl = class;
10422*4882a593Smuzhiyun cmd.u.params.unit = rateunit;
10423*4882a593Smuzhiyun cmd.u.params.rate = ratemode;
10424*4882a593Smuzhiyun cmd.u.params.min = cpu_to_be32(minrate);
10425*4882a593Smuzhiyun cmd.u.params.max = cpu_to_be32(maxrate);
10426*4882a593Smuzhiyun cmd.u.params.weight = cpu_to_be16(weight);
10427*4882a593Smuzhiyun cmd.u.params.pktsize = cpu_to_be16(pktsize);
10428*4882a593Smuzhiyun cmd.u.params.burstsize = cpu_to_be16(burstsize);
10429*4882a593Smuzhiyun
10430*4882a593Smuzhiyun return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10431*4882a593Smuzhiyun NULL, 1);
10432*4882a593Smuzhiyun }
10433*4882a593Smuzhiyun
10434*4882a593Smuzhiyun /**
10435*4882a593Smuzhiyun * t4_i2c_rd - read I2C data from adapter
10436*4882a593Smuzhiyun * @adap: the adapter
10437*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
10438*4882a593Smuzhiyun * @port: Port number if per-port device; <0 if not
10439*4882a593Smuzhiyun * @devid: per-port device ID or absolute device ID
10440*4882a593Smuzhiyun * @offset: byte offset into device I2C space
10441*4882a593Smuzhiyun * @len: byte length of I2C space data
10442*4882a593Smuzhiyun * @buf: buffer in which to return I2C data
10443*4882a593Smuzhiyun *
10444*4882a593Smuzhiyun * Reads the I2C data from the indicated device and location.
10445*4882a593Smuzhiyun */
t4_i2c_rd(struct adapter * adap,unsigned int mbox,int port,unsigned int devid,unsigned int offset,unsigned int len,u8 * buf)10446*4882a593Smuzhiyun int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10447*4882a593Smuzhiyun unsigned int devid, unsigned int offset,
10448*4882a593Smuzhiyun unsigned int len, u8 *buf)
10449*4882a593Smuzhiyun {
10450*4882a593Smuzhiyun struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10451*4882a593Smuzhiyun unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10452*4882a593Smuzhiyun int ret = 0;
10453*4882a593Smuzhiyun
10454*4882a593Smuzhiyun if (len > I2C_PAGE_SIZE)
10455*4882a593Smuzhiyun return -EINVAL;
10456*4882a593Smuzhiyun
10457*4882a593Smuzhiyun /* Dont allow reads that spans multiple pages */
10458*4882a593Smuzhiyun if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10459*4882a593Smuzhiyun return -EINVAL;
10460*4882a593Smuzhiyun
10461*4882a593Smuzhiyun memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10462*4882a593Smuzhiyun ldst_cmd.op_to_addrspace =
10463*4882a593Smuzhiyun cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10464*4882a593Smuzhiyun FW_CMD_REQUEST_F |
10465*4882a593Smuzhiyun FW_CMD_READ_F |
10466*4882a593Smuzhiyun FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10467*4882a593Smuzhiyun ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10468*4882a593Smuzhiyun ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10469*4882a593Smuzhiyun ldst_cmd.u.i2c.did = devid;
10470*4882a593Smuzhiyun
10471*4882a593Smuzhiyun while (len > 0) {
10472*4882a593Smuzhiyun unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10473*4882a593Smuzhiyun
10474*4882a593Smuzhiyun ldst_cmd.u.i2c.boffset = offset;
10475*4882a593Smuzhiyun ldst_cmd.u.i2c.blen = i2c_len;
10476*4882a593Smuzhiyun
10477*4882a593Smuzhiyun ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10478*4882a593Smuzhiyun &ldst_rpl);
10479*4882a593Smuzhiyun if (ret)
10480*4882a593Smuzhiyun break;
10481*4882a593Smuzhiyun
10482*4882a593Smuzhiyun memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10483*4882a593Smuzhiyun offset += i2c_len;
10484*4882a593Smuzhiyun buf += i2c_len;
10485*4882a593Smuzhiyun len -= i2c_len;
10486*4882a593Smuzhiyun }
10487*4882a593Smuzhiyun
10488*4882a593Smuzhiyun return ret;
10489*4882a593Smuzhiyun }
10490*4882a593Smuzhiyun
10491*4882a593Smuzhiyun /**
10492*4882a593Smuzhiyun * t4_set_vlan_acl - Set a VLAN id for the specified VF
10493*4882a593Smuzhiyun * @adap: the adapter
10494*4882a593Smuzhiyun * @mbox: mailbox to use for the FW command
10495*4882a593Smuzhiyun * @vf: one of the VFs instantiated by the specified PF
10496*4882a593Smuzhiyun * @vlan: The vlanid to be set
10497*4882a593Smuzhiyun */
t4_set_vlan_acl(struct adapter * adap,unsigned int mbox,unsigned int vf,u16 vlan)10498*4882a593Smuzhiyun int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10499*4882a593Smuzhiyun u16 vlan)
10500*4882a593Smuzhiyun {
10501*4882a593Smuzhiyun struct fw_acl_vlan_cmd vlan_cmd;
10502*4882a593Smuzhiyun unsigned int enable;
10503*4882a593Smuzhiyun
10504*4882a593Smuzhiyun enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10505*4882a593Smuzhiyun memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10506*4882a593Smuzhiyun vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10507*4882a593Smuzhiyun FW_CMD_REQUEST_F |
10508*4882a593Smuzhiyun FW_CMD_WRITE_F |
10509*4882a593Smuzhiyun FW_CMD_EXEC_F |
10510*4882a593Smuzhiyun FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10511*4882a593Smuzhiyun FW_ACL_VLAN_CMD_VFN_V(vf));
10512*4882a593Smuzhiyun vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10513*4882a593Smuzhiyun /* Drop all packets that donot match vlan id */
10514*4882a593Smuzhiyun vlan_cmd.dropnovlan_fm = (enable
10515*4882a593Smuzhiyun ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F |
10516*4882a593Smuzhiyun FW_ACL_VLAN_CMD_FM_F) : 0);
10517*4882a593Smuzhiyun if (enable != 0) {
10518*4882a593Smuzhiyun vlan_cmd.nvlan = 1;
10519*4882a593Smuzhiyun vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10520*4882a593Smuzhiyun }
10521*4882a593Smuzhiyun
10522*4882a593Smuzhiyun return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
10523*4882a593Smuzhiyun }
10524*4882a593Smuzhiyun
10525*4882a593Smuzhiyun /**
10526*4882a593Smuzhiyun * modify_device_id - Modifies the device ID of the Boot BIOS image
10527*4882a593Smuzhiyun * @device_id: the device ID to write.
10528*4882a593Smuzhiyun * @boot_data: the boot image to modify.
10529*4882a593Smuzhiyun *
10530*4882a593Smuzhiyun * Write the supplied device ID to the boot BIOS image.
10531*4882a593Smuzhiyun */
modify_device_id(int device_id,u8 * boot_data)10532*4882a593Smuzhiyun static void modify_device_id(int device_id, u8 *boot_data)
10533*4882a593Smuzhiyun {
10534*4882a593Smuzhiyun struct cxgb4_pcir_data *pcir_header;
10535*4882a593Smuzhiyun struct legacy_pci_rom_hdr *header;
10536*4882a593Smuzhiyun u8 *cur_header = boot_data;
10537*4882a593Smuzhiyun u16 pcir_offset;
10538*4882a593Smuzhiyun
10539*4882a593Smuzhiyun /* Loop through all chained images and change the device ID's */
10540*4882a593Smuzhiyun do {
10541*4882a593Smuzhiyun header = (struct legacy_pci_rom_hdr *)cur_header;
10542*4882a593Smuzhiyun pcir_offset = le16_to_cpu(header->pcir_offset);
10543*4882a593Smuzhiyun pcir_header = (struct cxgb4_pcir_data *)(cur_header +
10544*4882a593Smuzhiyun pcir_offset);
10545*4882a593Smuzhiyun
10546*4882a593Smuzhiyun /**
10547*4882a593Smuzhiyun * Only modify the Device ID if code type is Legacy or HP.
10548*4882a593Smuzhiyun * 0x00: Okay to modify
10549*4882a593Smuzhiyun * 0x01: FCODE. Do not modify
10550*4882a593Smuzhiyun * 0x03: Okay to modify
10551*4882a593Smuzhiyun * 0x04-0xFF: Do not modify
10552*4882a593Smuzhiyun */
10553*4882a593Smuzhiyun if (pcir_header->code_type == CXGB4_HDR_CODE1) {
10554*4882a593Smuzhiyun u8 csum = 0;
10555*4882a593Smuzhiyun int i;
10556*4882a593Smuzhiyun
10557*4882a593Smuzhiyun /**
10558*4882a593Smuzhiyun * Modify Device ID to match current adatper
10559*4882a593Smuzhiyun */
10560*4882a593Smuzhiyun pcir_header->device_id = cpu_to_le16(device_id);
10561*4882a593Smuzhiyun
10562*4882a593Smuzhiyun /**
10563*4882a593Smuzhiyun * Set checksum temporarily to 0.
10564*4882a593Smuzhiyun * We will recalculate it later.
10565*4882a593Smuzhiyun */
10566*4882a593Smuzhiyun header->cksum = 0x0;
10567*4882a593Smuzhiyun
10568*4882a593Smuzhiyun /**
10569*4882a593Smuzhiyun * Calculate and update checksum
10570*4882a593Smuzhiyun */
10571*4882a593Smuzhiyun for (i = 0; i < (header->size512 * 512); i++)
10572*4882a593Smuzhiyun csum += cur_header[i];
10573*4882a593Smuzhiyun
10574*4882a593Smuzhiyun /**
10575*4882a593Smuzhiyun * Invert summed value to create the checksum
10576*4882a593Smuzhiyun * Writing new checksum value directly to the boot data
10577*4882a593Smuzhiyun */
10578*4882a593Smuzhiyun cur_header[7] = -csum;
10579*4882a593Smuzhiyun
10580*4882a593Smuzhiyun } else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
10581*4882a593Smuzhiyun /**
10582*4882a593Smuzhiyun * Modify Device ID to match current adatper
10583*4882a593Smuzhiyun */
10584*4882a593Smuzhiyun pcir_header->device_id = cpu_to_le16(device_id);
10585*4882a593Smuzhiyun }
10586*4882a593Smuzhiyun
10587*4882a593Smuzhiyun /**
10588*4882a593Smuzhiyun * Move header pointer up to the next image in the ROM.
10589*4882a593Smuzhiyun */
10590*4882a593Smuzhiyun cur_header += header->size512 * 512;
10591*4882a593Smuzhiyun } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
10592*4882a593Smuzhiyun }
10593*4882a593Smuzhiyun
10594*4882a593Smuzhiyun /**
10595*4882a593Smuzhiyun * t4_load_boot - download boot flash
10596*4882a593Smuzhiyun * @adap: the adapter
10597*4882a593Smuzhiyun * @boot_data: the boot image to write
10598*4882a593Smuzhiyun * @boot_addr: offset in flash to write boot_data
10599*4882a593Smuzhiyun * @size: image size
10600*4882a593Smuzhiyun *
10601*4882a593Smuzhiyun * Write the supplied boot image to the card's serial flash.
10602*4882a593Smuzhiyun * The boot image has the following sections: a 28-byte header and the
10603*4882a593Smuzhiyun * boot image.
10604*4882a593Smuzhiyun */
t4_load_boot(struct adapter * adap,u8 * boot_data,unsigned int boot_addr,unsigned int size)10605*4882a593Smuzhiyun int t4_load_boot(struct adapter *adap, u8 *boot_data,
10606*4882a593Smuzhiyun unsigned int boot_addr, unsigned int size)
10607*4882a593Smuzhiyun {
10608*4882a593Smuzhiyun unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10609*4882a593Smuzhiyun unsigned int boot_sector = (boot_addr * 1024);
10610*4882a593Smuzhiyun struct cxgb4_pci_exp_rom_header *header;
10611*4882a593Smuzhiyun struct cxgb4_pcir_data *pcir_header;
10612*4882a593Smuzhiyun int pcir_offset;
10613*4882a593Smuzhiyun unsigned int i;
10614*4882a593Smuzhiyun u16 device_id;
10615*4882a593Smuzhiyun int ret, addr;
10616*4882a593Smuzhiyun
10617*4882a593Smuzhiyun /**
10618*4882a593Smuzhiyun * Make sure the boot image does not encroach on the firmware region
10619*4882a593Smuzhiyun */
10620*4882a593Smuzhiyun if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
10621*4882a593Smuzhiyun dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
10622*4882a593Smuzhiyun return -EFBIG;
10623*4882a593Smuzhiyun }
10624*4882a593Smuzhiyun
10625*4882a593Smuzhiyun /* Get boot header */
10626*4882a593Smuzhiyun header = (struct cxgb4_pci_exp_rom_header *)boot_data;
10627*4882a593Smuzhiyun pcir_offset = le16_to_cpu(header->pcir_offset);
10628*4882a593Smuzhiyun /* PCIR Data Structure */
10629*4882a593Smuzhiyun pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
10630*4882a593Smuzhiyun
10631*4882a593Smuzhiyun /**
10632*4882a593Smuzhiyun * Perform some primitive sanity testing to avoid accidentally
10633*4882a593Smuzhiyun * writing garbage over the boot sectors. We ought to check for
10634*4882a593Smuzhiyun * more but it's not worth it for now ...
10635*4882a593Smuzhiyun */
10636*4882a593Smuzhiyun if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
10637*4882a593Smuzhiyun dev_err(adap->pdev_dev, "boot image too small/large\n");
10638*4882a593Smuzhiyun return -EFBIG;
10639*4882a593Smuzhiyun }
10640*4882a593Smuzhiyun
10641*4882a593Smuzhiyun if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
10642*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Boot image missing signature\n");
10643*4882a593Smuzhiyun return -EINVAL;
10644*4882a593Smuzhiyun }
10645*4882a593Smuzhiyun
10646*4882a593Smuzhiyun /* Check PCI header signature */
10647*4882a593Smuzhiyun if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
10648*4882a593Smuzhiyun dev_err(adap->pdev_dev, "PCI header missing signature\n");
10649*4882a593Smuzhiyun return -EINVAL;
10650*4882a593Smuzhiyun }
10651*4882a593Smuzhiyun
10652*4882a593Smuzhiyun /* Check Vendor ID matches Chelsio ID*/
10653*4882a593Smuzhiyun if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
10654*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
10655*4882a593Smuzhiyun return -EINVAL;
10656*4882a593Smuzhiyun }
10657*4882a593Smuzhiyun
10658*4882a593Smuzhiyun /**
10659*4882a593Smuzhiyun * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
10660*4882a593Smuzhiyun * and Boot configuration data sections. These 3 boot sections span
10661*4882a593Smuzhiyun * sectors 0 to 7 in flash and live right before the FW image location.
10662*4882a593Smuzhiyun */
10663*4882a593Smuzhiyun i = DIV_ROUND_UP(size ? size : FLASH_FW_START, sf_sec_size);
10664*4882a593Smuzhiyun ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
10665*4882a593Smuzhiyun (boot_sector >> 16) + i - 1);
10666*4882a593Smuzhiyun
10667*4882a593Smuzhiyun /**
10668*4882a593Smuzhiyun * If size == 0 then we're simply erasing the FLASH sectors associated
10669*4882a593Smuzhiyun * with the on-adapter option ROM file
10670*4882a593Smuzhiyun */
10671*4882a593Smuzhiyun if (ret || size == 0)
10672*4882a593Smuzhiyun goto out;
10673*4882a593Smuzhiyun /* Retrieve adapter's device ID */
10674*4882a593Smuzhiyun pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
10675*4882a593Smuzhiyun /* Want to deal with PF 0 so I strip off PF 4 indicator */
10676*4882a593Smuzhiyun device_id = device_id & 0xf0ff;
10677*4882a593Smuzhiyun
10678*4882a593Smuzhiyun /* Check PCIE Device ID */
10679*4882a593Smuzhiyun if (le16_to_cpu(pcir_header->device_id) != device_id) {
10680*4882a593Smuzhiyun /**
10681*4882a593Smuzhiyun * Change the device ID in the Boot BIOS image to match
10682*4882a593Smuzhiyun * the Device ID of the current adapter.
10683*4882a593Smuzhiyun */
10684*4882a593Smuzhiyun modify_device_id(device_id, boot_data);
10685*4882a593Smuzhiyun }
10686*4882a593Smuzhiyun
10687*4882a593Smuzhiyun /**
10688*4882a593Smuzhiyun * Skip over the first SF_PAGE_SIZE worth of data and write it after
10689*4882a593Smuzhiyun * we finish copying the rest of the boot image. This will ensure
10690*4882a593Smuzhiyun * that the BIOS boot header will only be written if the boot image
10691*4882a593Smuzhiyun * was written in full.
10692*4882a593Smuzhiyun */
10693*4882a593Smuzhiyun addr = boot_sector;
10694*4882a593Smuzhiyun for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
10695*4882a593Smuzhiyun addr += SF_PAGE_SIZE;
10696*4882a593Smuzhiyun boot_data += SF_PAGE_SIZE;
10697*4882a593Smuzhiyun ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
10698*4882a593Smuzhiyun false);
10699*4882a593Smuzhiyun if (ret)
10700*4882a593Smuzhiyun goto out;
10701*4882a593Smuzhiyun }
10702*4882a593Smuzhiyun
10703*4882a593Smuzhiyun ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
10704*4882a593Smuzhiyun (const u8 *)header, false);
10705*4882a593Smuzhiyun
10706*4882a593Smuzhiyun out:
10707*4882a593Smuzhiyun if (ret)
10708*4882a593Smuzhiyun dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
10709*4882a593Smuzhiyun ret);
10710*4882a593Smuzhiyun return ret;
10711*4882a593Smuzhiyun }
10712*4882a593Smuzhiyun
10713*4882a593Smuzhiyun /**
10714*4882a593Smuzhiyun * t4_flash_bootcfg_addr - return the address of the flash
10715*4882a593Smuzhiyun * optionrom configuration
10716*4882a593Smuzhiyun * @adapter: the adapter
10717*4882a593Smuzhiyun *
10718*4882a593Smuzhiyun * Return the address within the flash where the OptionROM Configuration
10719*4882a593Smuzhiyun * is stored, or an error if the device FLASH is too small to contain
10720*4882a593Smuzhiyun * a OptionROM Configuration.
10721*4882a593Smuzhiyun */
t4_flash_bootcfg_addr(struct adapter * adapter)10722*4882a593Smuzhiyun static int t4_flash_bootcfg_addr(struct adapter *adapter)
10723*4882a593Smuzhiyun {
10724*4882a593Smuzhiyun /**
10725*4882a593Smuzhiyun * If the device FLASH isn't large enough to hold a Firmware
10726*4882a593Smuzhiyun * Configuration File, return an error.
10727*4882a593Smuzhiyun */
10728*4882a593Smuzhiyun if (adapter->params.sf_size <
10729*4882a593Smuzhiyun FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
10730*4882a593Smuzhiyun return -ENOSPC;
10731*4882a593Smuzhiyun
10732*4882a593Smuzhiyun return FLASH_BOOTCFG_START;
10733*4882a593Smuzhiyun }
10734*4882a593Smuzhiyun
t4_load_bootcfg(struct adapter * adap,const u8 * cfg_data,unsigned int size)10735*4882a593Smuzhiyun int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
10736*4882a593Smuzhiyun {
10737*4882a593Smuzhiyun unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
10738*4882a593Smuzhiyun struct cxgb4_bootcfg_data *header;
10739*4882a593Smuzhiyun unsigned int flash_cfg_start_sec;
10740*4882a593Smuzhiyun unsigned int addr, npad;
10741*4882a593Smuzhiyun int ret, i, n, cfg_addr;
10742*4882a593Smuzhiyun
10743*4882a593Smuzhiyun cfg_addr = t4_flash_bootcfg_addr(adap);
10744*4882a593Smuzhiyun if (cfg_addr < 0)
10745*4882a593Smuzhiyun return cfg_addr;
10746*4882a593Smuzhiyun
10747*4882a593Smuzhiyun addr = cfg_addr;
10748*4882a593Smuzhiyun flash_cfg_start_sec = addr / SF_SEC_SIZE;
10749*4882a593Smuzhiyun
10750*4882a593Smuzhiyun if (size > FLASH_BOOTCFG_MAX_SIZE) {
10751*4882a593Smuzhiyun dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
10752*4882a593Smuzhiyun FLASH_BOOTCFG_MAX_SIZE);
10753*4882a593Smuzhiyun return -EFBIG;
10754*4882a593Smuzhiyun }
10755*4882a593Smuzhiyun
10756*4882a593Smuzhiyun header = (struct cxgb4_bootcfg_data *)cfg_data;
10757*4882a593Smuzhiyun if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
10758*4882a593Smuzhiyun dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
10759*4882a593Smuzhiyun ret = -EINVAL;
10760*4882a593Smuzhiyun goto out;
10761*4882a593Smuzhiyun }
10762*4882a593Smuzhiyun
10763*4882a593Smuzhiyun i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
10764*4882a593Smuzhiyun sf_sec_size);
10765*4882a593Smuzhiyun ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
10766*4882a593Smuzhiyun flash_cfg_start_sec + i - 1);
10767*4882a593Smuzhiyun
10768*4882a593Smuzhiyun /**
10769*4882a593Smuzhiyun * If size == 0 then we're simply erasing the FLASH sectors associated
10770*4882a593Smuzhiyun * with the on-adapter OptionROM Configuration File.
10771*4882a593Smuzhiyun */
10772*4882a593Smuzhiyun if (ret || size == 0)
10773*4882a593Smuzhiyun goto out;
10774*4882a593Smuzhiyun
10775*4882a593Smuzhiyun /* this will write to the flash up to SF_PAGE_SIZE at a time */
10776*4882a593Smuzhiyun for (i = 0; i < size; i += SF_PAGE_SIZE) {
10777*4882a593Smuzhiyun n = min_t(u32, size - i, SF_PAGE_SIZE);
10778*4882a593Smuzhiyun
10779*4882a593Smuzhiyun ret = t4_write_flash(adap, addr, n, cfg_data, false);
10780*4882a593Smuzhiyun if (ret)
10781*4882a593Smuzhiyun goto out;
10782*4882a593Smuzhiyun
10783*4882a593Smuzhiyun addr += SF_PAGE_SIZE;
10784*4882a593Smuzhiyun cfg_data += SF_PAGE_SIZE;
10785*4882a593Smuzhiyun }
10786*4882a593Smuzhiyun
10787*4882a593Smuzhiyun npad = ((size + 4 - 1) & ~3) - size;
10788*4882a593Smuzhiyun for (i = 0; i < npad; i++) {
10789*4882a593Smuzhiyun u8 data = 0;
10790*4882a593Smuzhiyun
10791*4882a593Smuzhiyun ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
10792*4882a593Smuzhiyun false);
10793*4882a593Smuzhiyun if (ret)
10794*4882a593Smuzhiyun goto out;
10795*4882a593Smuzhiyun }
10796*4882a593Smuzhiyun
10797*4882a593Smuzhiyun out:
10798*4882a593Smuzhiyun if (ret)
10799*4882a593Smuzhiyun dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
10800*4882a593Smuzhiyun (size == 0 ? "clear" : "download"), ret);
10801*4882a593Smuzhiyun return ret;
10802*4882a593Smuzhiyun }
10803