1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright(c) 2015, 2016 Intel Corporation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
5*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16*4882a593Smuzhiyun * General Public License for more details.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * BSD LICENSE
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun * are met:
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun * the documentation and/or other materials provided with the
29*4882a593Smuzhiyun * distribution.
30*4882a593Smuzhiyun * - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun * from this software without specific prior written permission.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #include <linux/firmware.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include "hfi.h"
51*4882a593Smuzhiyun #include "efivar.h"
52*4882a593Smuzhiyun #include "eprom.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
55*4882a593Smuzhiyun
validate_scratch_checksum(struct hfi1_devdata * dd)56*4882a593Smuzhiyun static int validate_scratch_checksum(struct hfi1_devdata *dd)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun u64 checksum = 0, temp_scratch = 0;
59*4882a593Smuzhiyun int i, j, version;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
62*4882a593Smuzhiyun version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Prevent power on default of all zeroes from passing checksum */
65*4882a593Smuzhiyun if (!version) {
66*4882a593Smuzhiyun dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
67*4882a593Smuzhiyun dd_dev_err(dd,
68*4882a593Smuzhiyun "%s: Please update your BIOS to support active channels\n",
69*4882a593Smuzhiyun __func__);
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun * ASIC scratch 0 only contains the checksum and bitmap version as
75*4882a593Smuzhiyun * fields of interest, both of which are handled separately from the
76*4882a593Smuzhiyun * loop below, so skip it
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun checksum += version;
79*4882a593Smuzhiyun for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
80*4882a593Smuzhiyun temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
81*4882a593Smuzhiyun for (j = sizeof(u64); j != 0; j -= 2) {
82*4882a593Smuzhiyun checksum += (temp_scratch & 0xFFFF);
83*4882a593Smuzhiyun temp_scratch >>= 16;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun while (checksum >> 16)
88*4882a593Smuzhiyun checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
91*4882a593Smuzhiyun temp_scratch &= CHECKSUM_SMASK;
92*4882a593Smuzhiyun temp_scratch >>= CHECKSUM_SHIFT;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (checksum + temp_scratch == 0xFFFF)
95*4882a593Smuzhiyun return 1;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
98*4882a593Smuzhiyun return 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
save_platform_config_fields(struct hfi1_devdata * dd)101*4882a593Smuzhiyun static void save_platform_config_fields(struct hfi1_devdata *dd)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct hfi1_pportdata *ppd = dd->pport;
104*4882a593Smuzhiyun u64 temp_scratch = 0, temp_dest = 0;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun temp_dest = temp_scratch &
109*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
110*4882a593Smuzhiyun PORT0_PORT_TYPE_SMASK);
111*4882a593Smuzhiyun ppd->port_type = temp_dest >>
112*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
113*4882a593Smuzhiyun PORT0_PORT_TYPE_SHIFT);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun temp_dest = temp_scratch &
116*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
117*4882a593Smuzhiyun PORT0_LOCAL_ATTEN_SMASK);
118*4882a593Smuzhiyun ppd->local_atten = temp_dest >>
119*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
120*4882a593Smuzhiyun PORT0_LOCAL_ATTEN_SHIFT);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun temp_dest = temp_scratch &
123*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
124*4882a593Smuzhiyun PORT0_REMOTE_ATTEN_SMASK);
125*4882a593Smuzhiyun ppd->remote_atten = temp_dest >>
126*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
127*4882a593Smuzhiyun PORT0_REMOTE_ATTEN_SHIFT);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun temp_dest = temp_scratch &
130*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
131*4882a593Smuzhiyun PORT0_DEFAULT_ATTEN_SMASK);
132*4882a593Smuzhiyun ppd->default_atten = temp_dest >>
133*4882a593Smuzhiyun (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
134*4882a593Smuzhiyun PORT0_DEFAULT_ATTEN_SHIFT);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
137*4882a593Smuzhiyun ASIC_CFG_SCRATCH_2);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
140*4882a593Smuzhiyun ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
141*4882a593Smuzhiyun ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
144*4882a593Smuzhiyun QSFP_MAX_POWER_SHIFT;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun ppd->config_from_scratch = true;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
get_platform_config(struct hfi1_devdata * dd)149*4882a593Smuzhiyun void get_platform_config(struct hfi1_devdata *dd)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun int ret = 0;
152*4882a593Smuzhiyun u8 *temp_platform_config = NULL;
153*4882a593Smuzhiyun u32 esize;
154*4882a593Smuzhiyun const struct firmware *platform_config_file = NULL;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (is_integrated(dd)) {
157*4882a593Smuzhiyun if (validate_scratch_checksum(dd)) {
158*4882a593Smuzhiyun save_platform_config_fields(dd);
159*4882a593Smuzhiyun return;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun } else {
162*4882a593Smuzhiyun ret = eprom_read_platform_config(dd,
163*4882a593Smuzhiyun (void **)&temp_platform_config,
164*4882a593Smuzhiyun &esize);
165*4882a593Smuzhiyun if (!ret) {
166*4882a593Smuzhiyun /* success */
167*4882a593Smuzhiyun dd->platform_config.data = temp_platform_config;
168*4882a593Smuzhiyun dd->platform_config.size = esize;
169*4882a593Smuzhiyun return;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun dd_dev_err(dd,
173*4882a593Smuzhiyun "%s: Failed to get platform config, falling back to sub-optimal default file\n",
174*4882a593Smuzhiyun __func__);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun ret = request_firmware(&platform_config_file,
177*4882a593Smuzhiyun DEFAULT_PLATFORM_CONFIG_NAME,
178*4882a593Smuzhiyun &dd->pcidev->dev);
179*4882a593Smuzhiyun if (ret) {
180*4882a593Smuzhiyun dd_dev_err(dd,
181*4882a593Smuzhiyun "%s: No default platform config file found\n",
182*4882a593Smuzhiyun __func__);
183*4882a593Smuzhiyun return;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun * Allocate separate memory block to store data and free firmware
188*4882a593Smuzhiyun * structure. This allows free_platform_config to treat EPROM and
189*4882a593Smuzhiyun * fallback configs in the same manner.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun dd->platform_config.data = kmemdup(platform_config_file->data,
192*4882a593Smuzhiyun platform_config_file->size,
193*4882a593Smuzhiyun GFP_KERNEL);
194*4882a593Smuzhiyun dd->platform_config.size = platform_config_file->size;
195*4882a593Smuzhiyun release_firmware(platform_config_file);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
free_platform_config(struct hfi1_devdata * dd)198*4882a593Smuzhiyun void free_platform_config(struct hfi1_devdata *dd)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun /* Release memory allocated for eprom or fallback file read. */
201*4882a593Smuzhiyun kfree(dd->platform_config.data);
202*4882a593Smuzhiyun dd->platform_config.data = NULL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
get_port_type(struct hfi1_pportdata * ppd)205*4882a593Smuzhiyun void get_port_type(struct hfi1_pportdata *ppd)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun int ret;
208*4882a593Smuzhiyun u32 temp;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
211*4882a593Smuzhiyun PORT_TABLE_PORT_TYPE, &temp,
212*4882a593Smuzhiyun 4);
213*4882a593Smuzhiyun if (ret) {
214*4882a593Smuzhiyun ppd->port_type = PORT_TYPE_UNKNOWN;
215*4882a593Smuzhiyun return;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun ppd->port_type = temp;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
set_qsfp_tx(struct hfi1_pportdata * ppd,int on)220*4882a593Smuzhiyun int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u8 tx_ctrl_byte = on ? 0x0 : 0xF;
223*4882a593Smuzhiyun int ret = 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
226*4882a593Smuzhiyun &tx_ctrl_byte, 1);
227*4882a593Smuzhiyun /* we expected 1, so consider 0 an error */
228*4882a593Smuzhiyun if (ret == 0)
229*4882a593Smuzhiyun ret = -EIO;
230*4882a593Smuzhiyun else if (ret == 1)
231*4882a593Smuzhiyun ret = 0;
232*4882a593Smuzhiyun return ret;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
qual_power(struct hfi1_pportdata * ppd)235*4882a593Smuzhiyun static int qual_power(struct hfi1_pportdata *ppd)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun u32 cable_power_class = 0, power_class_max = 0;
238*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
239*4882a593Smuzhiyun int ret = 0;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun ret = get_platform_config_field(
242*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
243*4882a593Smuzhiyun SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
244*4882a593Smuzhiyun if (ret)
245*4882a593Smuzhiyun return ret;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (cable_power_class > power_class_max)
250*4882a593Smuzhiyun ppd->offline_disabled_reason =
251*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (ppd->offline_disabled_reason ==
254*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
255*4882a593Smuzhiyun dd_dev_err(
256*4882a593Smuzhiyun ppd->dd,
257*4882a593Smuzhiyun "%s: Port disabled due to system power restrictions\n",
258*4882a593Smuzhiyun __func__);
259*4882a593Smuzhiyun ret = -EPERM;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
qual_bitrate(struct hfi1_pportdata * ppd)264*4882a593Smuzhiyun static int qual_bitrate(struct hfi1_pportdata *ppd)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
267*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
270*4882a593Smuzhiyun cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
271*4882a593Smuzhiyun ppd->offline_disabled_reason =
272*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
275*4882a593Smuzhiyun cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
276*4882a593Smuzhiyun ppd->offline_disabled_reason =
277*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun if (ppd->offline_disabled_reason ==
280*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
281*4882a593Smuzhiyun dd_dev_err(
282*4882a593Smuzhiyun ppd->dd,
283*4882a593Smuzhiyun "%s: Cable failed bitrate check, disabling port\n",
284*4882a593Smuzhiyun __func__);
285*4882a593Smuzhiyun return -EPERM;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun return 0;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
set_qsfp_high_power(struct hfi1_pportdata * ppd)290*4882a593Smuzhiyun static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun u8 cable_power_class = 0, power_ctrl_byte = 0;
293*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
294*4882a593Smuzhiyun int ret;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (cable_power_class > QSFP_POWER_CLASS_1) {
299*4882a593Smuzhiyun power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun power_ctrl_byte |= 1;
302*4882a593Smuzhiyun power_ctrl_byte &= ~(0x2);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ret = qsfp_write(ppd, ppd->dd->hfi1_id,
305*4882a593Smuzhiyun QSFP_PWR_CTRL_BYTE_OFFS,
306*4882a593Smuzhiyun &power_ctrl_byte, 1);
307*4882a593Smuzhiyun if (ret != 1)
308*4882a593Smuzhiyun return -EIO;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (cable_power_class > QSFP_POWER_CLASS_4) {
311*4882a593Smuzhiyun power_ctrl_byte |= (1 << 2);
312*4882a593Smuzhiyun ret = qsfp_write(ppd, ppd->dd->hfi1_id,
313*4882a593Smuzhiyun QSFP_PWR_CTRL_BYTE_OFFS,
314*4882a593Smuzhiyun &power_ctrl_byte, 1);
315*4882a593Smuzhiyun if (ret != 1)
316*4882a593Smuzhiyun return -EIO;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* SFF 8679 rev 1.7 LPMode Deassert time */
320*4882a593Smuzhiyun msleep(300);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun return 0;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
apply_rx_cdr(struct hfi1_pportdata * ppd,u32 rx_preset_index,u8 * cdr_ctrl_byte)325*4882a593Smuzhiyun static void apply_rx_cdr(struct hfi1_pportdata *ppd,
326*4882a593Smuzhiyun u32 rx_preset_index,
327*4882a593Smuzhiyun u8 *cdr_ctrl_byte)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun u32 rx_preset;
330*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
331*4882a593Smuzhiyun int cable_power_class;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
334*4882a593Smuzhiyun (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
335*4882a593Smuzhiyun return;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* RX CDR present, bypass supported */
338*4882a593Smuzhiyun cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (cable_power_class <= QSFP_POWER_CLASS_3) {
341*4882a593Smuzhiyun /* Power class <= 3, ignore config & turn RX CDR on */
342*4882a593Smuzhiyun *cdr_ctrl_byte |= 0xF;
343*4882a593Smuzhiyun return;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun get_platform_config_field(
347*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
348*4882a593Smuzhiyun rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
349*4882a593Smuzhiyun &rx_preset, 4);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (!rx_preset) {
352*4882a593Smuzhiyun dd_dev_info(
353*4882a593Smuzhiyun ppd->dd,
354*4882a593Smuzhiyun "%s: RX_CDR_APPLY is set to disabled\n",
355*4882a593Smuzhiyun __func__);
356*4882a593Smuzhiyun return;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun get_platform_config_field(
359*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
360*4882a593Smuzhiyun rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
361*4882a593Smuzhiyun &rx_preset, 4);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Expand cdr setting to all 4 lanes */
364*4882a593Smuzhiyun rx_preset = (rx_preset | (rx_preset << 1) |
365*4882a593Smuzhiyun (rx_preset << 2) | (rx_preset << 3));
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (rx_preset) {
368*4882a593Smuzhiyun *cdr_ctrl_byte |= rx_preset;
369*4882a593Smuzhiyun } else {
370*4882a593Smuzhiyun *cdr_ctrl_byte &= rx_preset;
371*4882a593Smuzhiyun /* Preserve current TX CDR status */
372*4882a593Smuzhiyun *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
apply_tx_cdr(struct hfi1_pportdata * ppd,u32 tx_preset_index,u8 * cdr_ctrl_byte)376*4882a593Smuzhiyun static void apply_tx_cdr(struct hfi1_pportdata *ppd,
377*4882a593Smuzhiyun u32 tx_preset_index,
378*4882a593Smuzhiyun u8 *cdr_ctrl_byte)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun u32 tx_preset;
381*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
382*4882a593Smuzhiyun int cable_power_class;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
385*4882a593Smuzhiyun (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
386*4882a593Smuzhiyun return;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* TX CDR present, bypass supported */
389*4882a593Smuzhiyun cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (cable_power_class <= QSFP_POWER_CLASS_3) {
392*4882a593Smuzhiyun /* Power class <= 3, ignore config & turn TX CDR on */
393*4882a593Smuzhiyun *cdr_ctrl_byte |= 0xF0;
394*4882a593Smuzhiyun return;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun get_platform_config_field(
398*4882a593Smuzhiyun ppd->dd,
399*4882a593Smuzhiyun PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
400*4882a593Smuzhiyun TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!tx_preset) {
403*4882a593Smuzhiyun dd_dev_info(
404*4882a593Smuzhiyun ppd->dd,
405*4882a593Smuzhiyun "%s: TX_CDR_APPLY is set to disabled\n",
406*4882a593Smuzhiyun __func__);
407*4882a593Smuzhiyun return;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun get_platform_config_field(
410*4882a593Smuzhiyun ppd->dd,
411*4882a593Smuzhiyun PLATFORM_CONFIG_TX_PRESET_TABLE,
412*4882a593Smuzhiyun tx_preset_index,
413*4882a593Smuzhiyun TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* Expand cdr setting to all 4 lanes */
416*4882a593Smuzhiyun tx_preset = (tx_preset | (tx_preset << 1) |
417*4882a593Smuzhiyun (tx_preset << 2) | (tx_preset << 3));
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (tx_preset)
420*4882a593Smuzhiyun *cdr_ctrl_byte |= (tx_preset << 4);
421*4882a593Smuzhiyun else
422*4882a593Smuzhiyun /* Preserve current/determined RX CDR status */
423*4882a593Smuzhiyun *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
apply_cdr_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)426*4882a593Smuzhiyun static void apply_cdr_settings(
427*4882a593Smuzhiyun struct hfi1_pportdata *ppd, u32 rx_preset_index,
428*4882a593Smuzhiyun u32 tx_preset_index)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
431*4882a593Smuzhiyun u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
438*4882a593Smuzhiyun &cdr_ctrl_byte, 1);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
apply_tx_eq_auto(struct hfi1_pportdata * ppd)441*4882a593Smuzhiyun static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
444*4882a593Smuzhiyun u8 tx_eq;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
447*4882a593Smuzhiyun return;
448*4882a593Smuzhiyun /* Disable adaptive TX EQ if present */
449*4882a593Smuzhiyun tx_eq = cache[(128 * 3) + 241];
450*4882a593Smuzhiyun tx_eq &= 0xF0;
451*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
apply_tx_eq_prog(struct hfi1_pportdata * ppd,u32 tx_preset_index)454*4882a593Smuzhiyun static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
457*4882a593Smuzhiyun u32 tx_preset;
458*4882a593Smuzhiyun u8 tx_eq;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
461*4882a593Smuzhiyun return;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun get_platform_config_field(
464*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
465*4882a593Smuzhiyun tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
466*4882a593Smuzhiyun &tx_preset, 4);
467*4882a593Smuzhiyun if (!tx_preset) {
468*4882a593Smuzhiyun dd_dev_info(
469*4882a593Smuzhiyun ppd->dd,
470*4882a593Smuzhiyun "%s: TX_EQ_APPLY is set to disabled\n",
471*4882a593Smuzhiyun __func__);
472*4882a593Smuzhiyun return;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun get_platform_config_field(
475*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
476*4882a593Smuzhiyun tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
477*4882a593Smuzhiyun &tx_preset, 4);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
480*4882a593Smuzhiyun dd_dev_info(
481*4882a593Smuzhiyun ppd->dd,
482*4882a593Smuzhiyun "%s: TX EQ %x unsupported\n",
483*4882a593Smuzhiyun __func__, tx_preset);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun dd_dev_info(
486*4882a593Smuzhiyun ppd->dd,
487*4882a593Smuzhiyun "%s: Applying EQ %x\n",
488*4882a593Smuzhiyun __func__, cache[608] & 0xF0);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun tx_preset = (cache[608] & 0xF0) >> 4;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun tx_eq = tx_preset | (tx_preset << 4);
494*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
495*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
apply_rx_eq_emp(struct hfi1_pportdata * ppd,u32 rx_preset_index)498*4882a593Smuzhiyun static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun u32 rx_preset;
501*4882a593Smuzhiyun u8 rx_eq, *cache = ppd->qsfp_info.cache;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
504*4882a593Smuzhiyun return;
505*4882a593Smuzhiyun get_platform_config_field(
506*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
507*4882a593Smuzhiyun rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
508*4882a593Smuzhiyun &rx_preset, 4);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (!rx_preset) {
511*4882a593Smuzhiyun dd_dev_info(
512*4882a593Smuzhiyun ppd->dd,
513*4882a593Smuzhiyun "%s: RX_EMP_APPLY is set to disabled\n",
514*4882a593Smuzhiyun __func__);
515*4882a593Smuzhiyun return;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun get_platform_config_field(
518*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
519*4882a593Smuzhiyun rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
520*4882a593Smuzhiyun &rx_preset, 4);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
523*4882a593Smuzhiyun dd_dev_info(
524*4882a593Smuzhiyun ppd->dd,
525*4882a593Smuzhiyun "%s: Requested RX EMP %x\n",
526*4882a593Smuzhiyun __func__, rx_preset);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun dd_dev_info(
529*4882a593Smuzhiyun ppd->dd,
530*4882a593Smuzhiyun "%s: Applying supported EMP %x\n",
531*4882a593Smuzhiyun __func__, cache[608] & 0xF);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun rx_preset = cache[608] & 0xF;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun rx_eq = rx_preset | (rx_preset << 4);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
539*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
apply_eq_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)542*4882a593Smuzhiyun static void apply_eq_settings(struct hfi1_pportdata *ppd,
543*4882a593Smuzhiyun u32 rx_preset_index, u32 tx_preset_index)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* no point going on w/o a page 3 */
548*4882a593Smuzhiyun if (cache[2] & 4) {
549*4882a593Smuzhiyun dd_dev_info(ppd->dd,
550*4882a593Smuzhiyun "%s: Upper page 03 not present\n",
551*4882a593Smuzhiyun __func__);
552*4882a593Smuzhiyun return;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun apply_tx_eq_auto(ppd);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun apply_tx_eq_prog(ppd, tx_preset_index);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun apply_rx_eq_emp(ppd, rx_preset_index);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
apply_rx_amplitude_settings(struct hfi1_pportdata * ppd,u32 rx_preset_index,u32 tx_preset_index)562*4882a593Smuzhiyun static void apply_rx_amplitude_settings(
563*4882a593Smuzhiyun struct hfi1_pportdata *ppd, u32 rx_preset_index,
564*4882a593Smuzhiyun u32 tx_preset_index)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun u32 rx_preset;
567*4882a593Smuzhiyun u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /* no point going on w/o a page 3 */
570*4882a593Smuzhiyun if (cache[2] & 4) {
571*4882a593Smuzhiyun dd_dev_info(ppd->dd,
572*4882a593Smuzhiyun "%s: Upper page 03 not present\n",
573*4882a593Smuzhiyun __func__);
574*4882a593Smuzhiyun return;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
577*4882a593Smuzhiyun dd_dev_info(ppd->dd,
578*4882a593Smuzhiyun "%s: RX_AMP_APPLY is set to disabled\n",
579*4882a593Smuzhiyun __func__);
580*4882a593Smuzhiyun return;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun get_platform_config_field(ppd->dd,
584*4882a593Smuzhiyun PLATFORM_CONFIG_RX_PRESET_TABLE,
585*4882a593Smuzhiyun rx_preset_index,
586*4882a593Smuzhiyun RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
587*4882a593Smuzhiyun &rx_preset, 4);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (!rx_preset) {
590*4882a593Smuzhiyun dd_dev_info(ppd->dd,
591*4882a593Smuzhiyun "%s: RX_AMP_APPLY is set to disabled\n",
592*4882a593Smuzhiyun __func__);
593*4882a593Smuzhiyun return;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun get_platform_config_field(ppd->dd,
596*4882a593Smuzhiyun PLATFORM_CONFIG_RX_PRESET_TABLE,
597*4882a593Smuzhiyun rx_preset_index,
598*4882a593Smuzhiyun RX_PRESET_TABLE_QSFP_RX_AMP,
599*4882a593Smuzhiyun &rx_preset, 4);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun dd_dev_info(ppd->dd,
602*4882a593Smuzhiyun "%s: Requested RX AMP %x\n",
603*4882a593Smuzhiyun __func__,
604*4882a593Smuzhiyun rx_preset);
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
607*4882a593Smuzhiyun if (cache[(128 * 3) + 225] & (1 << i)) {
608*4882a593Smuzhiyun preferred = i;
609*4882a593Smuzhiyun if (preferred == rx_preset)
610*4882a593Smuzhiyun break;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * Verify that preferred RX amplitude is not just a
616*4882a593Smuzhiyun * fall through of the default
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
619*4882a593Smuzhiyun dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
620*4882a593Smuzhiyun return;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun dd_dev_info(ppd->dd,
624*4882a593Smuzhiyun "%s: Applying RX AMP %x\n", __func__, preferred);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun rx_amp = preferred | (preferred << 4);
627*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
628*4882a593Smuzhiyun qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun #define OPA_INVALID_INDEX 0xFFF
632*4882a593Smuzhiyun
apply_tx_lanes(struct hfi1_pportdata * ppd,u8 field_id,u32 config_data,const char * message)633*4882a593Smuzhiyun static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
634*4882a593Smuzhiyun u32 config_data, const char *message)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun u8 i;
637*4882a593Smuzhiyun int ret;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
640*4882a593Smuzhiyun ret = load_8051_config(ppd->dd, field_id, i, config_data);
641*4882a593Smuzhiyun if (ret != HCMD_SUCCESS) {
642*4882a593Smuzhiyun dd_dev_err(
643*4882a593Smuzhiyun ppd->dd,
644*4882a593Smuzhiyun "%s: %s for lane %u failed\n",
645*4882a593Smuzhiyun message, __func__, i);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * Return a special SerDes setting for low power AOC cables. The power class
652*4882a593Smuzhiyun * threshold and setting being used were all found by empirical testing.
653*4882a593Smuzhiyun *
654*4882a593Smuzhiyun * Summary of the logic:
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
657*4882a593Smuzhiyun * return 0xe
658*4882a593Smuzhiyun * return 0; // leave at default
659*4882a593Smuzhiyun */
aoc_low_power_setting(struct hfi1_pportdata * ppd)660*4882a593Smuzhiyun static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
663*4882a593Smuzhiyun int power_class;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* QSFP only */
666*4882a593Smuzhiyun if (ppd->port_type != PORT_TYPE_QSFP)
667*4882a593Smuzhiyun return 0; /* leave at default */
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* active optical cables only */
670*4882a593Smuzhiyun switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
671*4882a593Smuzhiyun case 0x0 ... 0x9: fallthrough;
672*4882a593Smuzhiyun case 0xC: fallthrough;
673*4882a593Smuzhiyun case 0xE:
674*4882a593Smuzhiyun /* active AOC */
675*4882a593Smuzhiyun power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
676*4882a593Smuzhiyun if (power_class < QSFP_POWER_CLASS_4)
677*4882a593Smuzhiyun return 0xe;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun return 0; /* leave at default */
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
apply_tunings(struct hfi1_pportdata * ppd,u32 tx_preset_index,u8 tuning_method,u32 total_atten,u8 limiting_active)682*4882a593Smuzhiyun static void apply_tunings(
683*4882a593Smuzhiyun struct hfi1_pportdata *ppd, u32 tx_preset_index,
684*4882a593Smuzhiyun u8 tuning_method, u32 total_atten, u8 limiting_active)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun int ret = 0;
687*4882a593Smuzhiyun u32 config_data = 0, tx_preset = 0;
688*4882a593Smuzhiyun u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
689*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /* Pass tuning method to 8051 */
692*4882a593Smuzhiyun read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
693*4882a593Smuzhiyun &config_data);
694*4882a593Smuzhiyun config_data &= ~(0xff << TUNING_METHOD_SHIFT);
695*4882a593Smuzhiyun config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
696*4882a593Smuzhiyun ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
697*4882a593Smuzhiyun config_data);
698*4882a593Smuzhiyun if (ret != HCMD_SUCCESS)
699*4882a593Smuzhiyun dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
700*4882a593Smuzhiyun __func__);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Set same channel loss for both TX and RX */
703*4882a593Smuzhiyun config_data = 0 | (total_atten << 16) | (total_atten << 24);
704*4882a593Smuzhiyun apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
705*4882a593Smuzhiyun "Setting channel loss");
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Inform 8051 of cable capabilities */
708*4882a593Smuzhiyun if (ppd->qsfp_info.cache_valid) {
709*4882a593Smuzhiyun external_device_config =
710*4882a593Smuzhiyun ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
711*4882a593Smuzhiyun ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
712*4882a593Smuzhiyun ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
713*4882a593Smuzhiyun (cache[QSFP_EQ_INFO_OFFS] & 0x4);
714*4882a593Smuzhiyun ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
715*4882a593Smuzhiyun GENERAL_CONFIG, &config_data);
716*4882a593Smuzhiyun /* Clear, then set the external device config field */
717*4882a593Smuzhiyun config_data &= ~(u32)0xFF;
718*4882a593Smuzhiyun config_data |= external_device_config;
719*4882a593Smuzhiyun ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
720*4882a593Smuzhiyun GENERAL_CONFIG, config_data);
721*4882a593Smuzhiyun if (ret != HCMD_SUCCESS)
722*4882a593Smuzhiyun dd_dev_err(ppd->dd,
723*4882a593Smuzhiyun "%s: Failed set ext device config params\n",
724*4882a593Smuzhiyun __func__);
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (tx_preset_index == OPA_INVALID_INDEX) {
728*4882a593Smuzhiyun if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
729*4882a593Smuzhiyun dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
730*4882a593Smuzhiyun __func__);
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* Following for limiting active channels only */
735*4882a593Smuzhiyun get_platform_config_field(
736*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
737*4882a593Smuzhiyun TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
738*4882a593Smuzhiyun precur = tx_preset;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun get_platform_config_field(
741*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
742*4882a593Smuzhiyun tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
743*4882a593Smuzhiyun attn = tx_preset;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun get_platform_config_field(
746*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
747*4882a593Smuzhiyun tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
748*4882a593Smuzhiyun postcur = tx_preset;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun * NOTES:
752*4882a593Smuzhiyun * o The aoc_low_power_setting is applied to all lanes even
753*4882a593Smuzhiyun * though only lane 0's value is examined by the firmware.
754*4882a593Smuzhiyun * o A lingering low power setting after a cable swap does
755*4882a593Smuzhiyun * not occur. On cable unplug the 8051 is reset and
756*4882a593Smuzhiyun * restarted on cable insert. This resets all settings to
757*4882a593Smuzhiyun * their default, erasing any previous low power setting.
758*4882a593Smuzhiyun */
759*4882a593Smuzhiyun config_data = precur | (attn << 8) | (postcur << 16) |
760*4882a593Smuzhiyun (aoc_low_power_setting(ppd) << 24);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
763*4882a593Smuzhiyun "Applying TX settings");
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /* Must be holding the QSFP i2c resource */
tune_active_qsfp(struct hfi1_pportdata * ppd,u32 * ptr_tx_preset,u32 * ptr_rx_preset,u32 * ptr_total_atten)767*4882a593Smuzhiyun static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
768*4882a593Smuzhiyun u32 *ptr_rx_preset, u32 *ptr_total_atten)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun int ret;
771*4882a593Smuzhiyun u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
772*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun ppd->qsfp_info.limiting_active = 1;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun ret = set_qsfp_tx(ppd, 0);
777*4882a593Smuzhiyun if (ret)
778*4882a593Smuzhiyun return ret;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun ret = qual_power(ppd);
781*4882a593Smuzhiyun if (ret)
782*4882a593Smuzhiyun return ret;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun ret = qual_bitrate(ppd);
785*4882a593Smuzhiyun if (ret)
786*4882a593Smuzhiyun return ret;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /*
789*4882a593Smuzhiyun * We'll change the QSFP memory contents from here on out, thus we set a
790*4882a593Smuzhiyun * flag here to remind ourselves to reset the QSFP module. This prevents
791*4882a593Smuzhiyun * reuse of stale settings established in our previous pass through.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun if (ppd->qsfp_info.reset_needed) {
794*4882a593Smuzhiyun ret = reset_qsfp(ppd);
795*4882a593Smuzhiyun if (ret)
796*4882a593Smuzhiyun return ret;
797*4882a593Smuzhiyun refresh_qsfp_cache(ppd, &ppd->qsfp_info);
798*4882a593Smuzhiyun } else {
799*4882a593Smuzhiyun ppd->qsfp_info.reset_needed = 1;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun ret = set_qsfp_high_power(ppd);
803*4882a593Smuzhiyun if (ret)
804*4882a593Smuzhiyun return ret;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
807*4882a593Smuzhiyun ret = get_platform_config_field(
808*4882a593Smuzhiyun ppd->dd,
809*4882a593Smuzhiyun PLATFORM_CONFIG_PORT_TABLE, 0,
810*4882a593Smuzhiyun PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
811*4882a593Smuzhiyun ptr_tx_preset, 4);
812*4882a593Smuzhiyun if (ret) {
813*4882a593Smuzhiyun *ptr_tx_preset = OPA_INVALID_INDEX;
814*4882a593Smuzhiyun return ret;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun } else {
817*4882a593Smuzhiyun ret = get_platform_config_field(
818*4882a593Smuzhiyun ppd->dd,
819*4882a593Smuzhiyun PLATFORM_CONFIG_PORT_TABLE, 0,
820*4882a593Smuzhiyun PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
821*4882a593Smuzhiyun ptr_tx_preset, 4);
822*4882a593Smuzhiyun if (ret) {
823*4882a593Smuzhiyun *ptr_tx_preset = OPA_INVALID_INDEX;
824*4882a593Smuzhiyun return ret;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun ret = get_platform_config_field(
829*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
830*4882a593Smuzhiyun PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
831*4882a593Smuzhiyun if (ret) {
832*4882a593Smuzhiyun *ptr_rx_preset = OPA_INVALID_INDEX;
833*4882a593Smuzhiyun return ret;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
837*4882a593Smuzhiyun get_platform_config_field(
838*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
839*4882a593Smuzhiyun PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
840*4882a593Smuzhiyun else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
841*4882a593Smuzhiyun get_platform_config_field(
842*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
843*4882a593Smuzhiyun PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun ret = set_qsfp_tx(ppd, 1);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return ret;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
tune_qsfp(struct hfi1_pportdata * ppd,u32 * ptr_tx_preset,u32 * ptr_rx_preset,u8 * ptr_tuning_method,u32 * ptr_total_atten)856*4882a593Smuzhiyun static int tune_qsfp(struct hfi1_pportdata *ppd,
857*4882a593Smuzhiyun u32 *ptr_tx_preset, u32 *ptr_rx_preset,
858*4882a593Smuzhiyun u8 *ptr_tuning_method, u32 *ptr_total_atten)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
861*4882a593Smuzhiyun u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
862*4882a593Smuzhiyun int ret = 0;
863*4882a593Smuzhiyun u8 *cache = ppd->qsfp_info.cache;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
866*4882a593Smuzhiyun case 0xA ... 0xB:
867*4882a593Smuzhiyun ret = get_platform_config_field(
868*4882a593Smuzhiyun ppd->dd,
869*4882a593Smuzhiyun PLATFORM_CONFIG_PORT_TABLE, 0,
870*4882a593Smuzhiyun PORT_TABLE_LOCAL_ATTEN_25G,
871*4882a593Smuzhiyun &platform_atten, 4);
872*4882a593Smuzhiyun if (ret)
873*4882a593Smuzhiyun return ret;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
876*4882a593Smuzhiyun cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
877*4882a593Smuzhiyun else if ((lss & OPA_LINK_SPEED_12_5G) &&
878*4882a593Smuzhiyun (lse & OPA_LINK_SPEED_12_5G))
879*4882a593Smuzhiyun cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /* Fallback to configured attenuation if cable memory is bad */
882*4882a593Smuzhiyun if (cable_atten == 0 || cable_atten > 36) {
883*4882a593Smuzhiyun ret = get_platform_config_field(
884*4882a593Smuzhiyun ppd->dd,
885*4882a593Smuzhiyun PLATFORM_CONFIG_SYSTEM_TABLE, 0,
886*4882a593Smuzhiyun SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
887*4882a593Smuzhiyun &cable_atten, 4);
888*4882a593Smuzhiyun if (ret)
889*4882a593Smuzhiyun return ret;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun ret = get_platform_config_field(
893*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
894*4882a593Smuzhiyun PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
895*4882a593Smuzhiyun if (ret)
896*4882a593Smuzhiyun return ret;
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun *ptr_total_atten = platform_atten + cable_atten + remote_atten;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun *ptr_tuning_method = OPA_PASSIVE_TUNING;
901*4882a593Smuzhiyun break;
902*4882a593Smuzhiyun case 0x0 ... 0x9: fallthrough;
903*4882a593Smuzhiyun case 0xC: fallthrough;
904*4882a593Smuzhiyun case 0xE:
905*4882a593Smuzhiyun ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
906*4882a593Smuzhiyun ptr_total_atten);
907*4882a593Smuzhiyun if (ret)
908*4882a593Smuzhiyun return ret;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun *ptr_tuning_method = OPA_ACTIVE_TUNING;
911*4882a593Smuzhiyun break;
912*4882a593Smuzhiyun case 0xD: fallthrough;
913*4882a593Smuzhiyun case 0xF:
914*4882a593Smuzhiyun default:
915*4882a593Smuzhiyun dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
916*4882a593Smuzhiyun __func__);
917*4882a593Smuzhiyun break;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun return ret;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /*
923*4882a593Smuzhiyun * This function communicates its success or failure via ppd->driver_link_ready
924*4882a593Smuzhiyun * Thus, it depends on its association with start_link(...) which checks
925*4882a593Smuzhiyun * driver_link_ready before proceeding with the link negotiation and
926*4882a593Smuzhiyun * initialization process.
927*4882a593Smuzhiyun */
tune_serdes(struct hfi1_pportdata * ppd)928*4882a593Smuzhiyun void tune_serdes(struct hfi1_pportdata *ppd)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun int ret = 0;
931*4882a593Smuzhiyun u32 total_atten = 0;
932*4882a593Smuzhiyun u32 remote_atten = 0, platform_atten = 0;
933*4882a593Smuzhiyun u32 rx_preset_index, tx_preset_index;
934*4882a593Smuzhiyun u8 tuning_method = 0, limiting_active = 0;
935*4882a593Smuzhiyun struct hfi1_devdata *dd = ppd->dd;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun rx_preset_index = OPA_INVALID_INDEX;
938*4882a593Smuzhiyun tx_preset_index = OPA_INVALID_INDEX;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /* the link defaults to enabled */
941*4882a593Smuzhiyun ppd->link_enabled = 1;
942*4882a593Smuzhiyun /* the driver link ready state defaults to not ready */
943*4882a593Smuzhiyun ppd->driver_link_ready = 0;
944*4882a593Smuzhiyun ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /* Skip the tuning for testing (loopback != none) and simulations */
947*4882a593Smuzhiyun if (loopback != LOOPBACK_NONE ||
948*4882a593Smuzhiyun ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
949*4882a593Smuzhiyun ppd->driver_link_ready = 1;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (qsfp_mod_present(ppd)) {
952*4882a593Smuzhiyun ret = acquire_chip_resource(ppd->dd,
953*4882a593Smuzhiyun qsfp_resource(ppd->dd),
954*4882a593Smuzhiyun QSFP_WAIT);
955*4882a593Smuzhiyun if (ret) {
956*4882a593Smuzhiyun dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
957*4882a593Smuzhiyun __func__, (int)ppd->dd->hfi1_id);
958*4882a593Smuzhiyun goto bail;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun refresh_qsfp_cache(ppd, &ppd->qsfp_info);
962*4882a593Smuzhiyun release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun return;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun switch (ppd->port_type) {
969*4882a593Smuzhiyun case PORT_TYPE_DISCONNECTED:
970*4882a593Smuzhiyun ppd->offline_disabled_reason =
971*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
972*4882a593Smuzhiyun dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
973*4882a593Smuzhiyun __func__);
974*4882a593Smuzhiyun goto bail;
975*4882a593Smuzhiyun case PORT_TYPE_FIXED:
976*4882a593Smuzhiyun /* platform_atten, remote_atten pre-zeroed to catch error */
977*4882a593Smuzhiyun get_platform_config_field(
978*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
979*4882a593Smuzhiyun PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun get_platform_config_field(
982*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
983*4882a593Smuzhiyun PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun total_atten = platform_atten + remote_atten;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun tuning_method = OPA_PASSIVE_TUNING;
988*4882a593Smuzhiyun break;
989*4882a593Smuzhiyun case PORT_TYPE_VARIABLE:
990*4882a593Smuzhiyun if (qsfp_mod_present(ppd)) {
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun * platform_atten, remote_atten pre-zeroed to
993*4882a593Smuzhiyun * catch error
994*4882a593Smuzhiyun */
995*4882a593Smuzhiyun get_platform_config_field(
996*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
997*4882a593Smuzhiyun PORT_TABLE_LOCAL_ATTEN_25G,
998*4882a593Smuzhiyun &platform_atten, 4);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun get_platform_config_field(
1001*4882a593Smuzhiyun ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
1002*4882a593Smuzhiyun PORT_TABLE_REMOTE_ATTEN_25G,
1003*4882a593Smuzhiyun &remote_atten, 4);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun total_atten = platform_atten + remote_atten;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun tuning_method = OPA_PASSIVE_TUNING;
1008*4882a593Smuzhiyun } else {
1009*4882a593Smuzhiyun ppd->offline_disabled_reason =
1010*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
1011*4882a593Smuzhiyun goto bail;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun break;
1014*4882a593Smuzhiyun case PORT_TYPE_QSFP:
1015*4882a593Smuzhiyun if (qsfp_mod_present(ppd)) {
1016*4882a593Smuzhiyun ret = acquire_chip_resource(ppd->dd,
1017*4882a593Smuzhiyun qsfp_resource(ppd->dd),
1018*4882a593Smuzhiyun QSFP_WAIT);
1019*4882a593Smuzhiyun if (ret) {
1020*4882a593Smuzhiyun dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
1021*4882a593Smuzhiyun __func__, (int)ppd->dd->hfi1_id);
1022*4882a593Smuzhiyun goto bail;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun if (ppd->qsfp_info.cache_valid) {
1027*4882a593Smuzhiyun ret = tune_qsfp(ppd,
1028*4882a593Smuzhiyun &tx_preset_index,
1029*4882a593Smuzhiyun &rx_preset_index,
1030*4882a593Smuzhiyun &tuning_method,
1031*4882a593Smuzhiyun &total_atten);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /*
1034*4882a593Smuzhiyun * We may have modified the QSFP memory, so
1035*4882a593Smuzhiyun * update the cache to reflect the changes
1036*4882a593Smuzhiyun */
1037*4882a593Smuzhiyun refresh_qsfp_cache(ppd, &ppd->qsfp_info);
1038*4882a593Smuzhiyun limiting_active =
1039*4882a593Smuzhiyun ppd->qsfp_info.limiting_active;
1040*4882a593Smuzhiyun } else {
1041*4882a593Smuzhiyun dd_dev_err(dd,
1042*4882a593Smuzhiyun "%s: Reading QSFP memory failed\n",
1043*4882a593Smuzhiyun __func__);
1044*4882a593Smuzhiyun ret = -EINVAL; /* a fail indication */
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
1047*4882a593Smuzhiyun if (ret)
1048*4882a593Smuzhiyun goto bail;
1049*4882a593Smuzhiyun } else {
1050*4882a593Smuzhiyun ppd->offline_disabled_reason =
1051*4882a593Smuzhiyun HFI1_ODR_MASK(
1052*4882a593Smuzhiyun OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
1053*4882a593Smuzhiyun goto bail;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun break;
1056*4882a593Smuzhiyun default:
1057*4882a593Smuzhiyun dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
1058*4882a593Smuzhiyun ppd->port_type = PORT_TYPE_UNKNOWN;
1059*4882a593Smuzhiyun tuning_method = OPA_UNKNOWN_TUNING;
1060*4882a593Smuzhiyun total_atten = 0;
1061*4882a593Smuzhiyun limiting_active = 0;
1062*4882a593Smuzhiyun tx_preset_index = OPA_INVALID_INDEX;
1063*4882a593Smuzhiyun break;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (ppd->offline_disabled_reason ==
1067*4882a593Smuzhiyun HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
1068*4882a593Smuzhiyun apply_tunings(ppd, tx_preset_index, tuning_method,
1069*4882a593Smuzhiyun total_atten, limiting_active);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (!ret)
1072*4882a593Smuzhiyun ppd->driver_link_ready = 1;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return;
1075*4882a593Smuzhiyun bail:
1076*4882a593Smuzhiyun ppd->driver_link_ready = 0;
1077*4882a593Smuzhiyun }
1078