1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Rockchip PCIe Apis For WIFI
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/aspm_ext.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun
rockchip_pcie_pcie_access_cap(struct pci_dev * pdev,int cap,uint offset,bool is_ext,bool is_write,u32 writeval)15*4882a593Smuzhiyun static u32 rockchip_pcie_pcie_access_cap(struct pci_dev *pdev, int cap, uint offset,
16*4882a593Smuzhiyun bool is_ext, bool is_write, u32 writeval)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun int cap_ptr = 0;
19*4882a593Smuzhiyun u32 ret = -1;
20*4882a593Smuzhiyun u32 readval;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun if (!(pdev)) {
23*4882a593Smuzhiyun pci_err(pdev, "%s: pdev is NULL\n", __func__);
24*4882a593Smuzhiyun return ret;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Find Capability offset */
28*4882a593Smuzhiyun if (is_ext) {
29*4882a593Smuzhiyun /* removing max EXT_CAP_ID check as
30*4882a593Smuzhiyun * linux kernel definition's max value is not updated yet as per spec
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun cap_ptr = pci_find_ext_capability(pdev, cap);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun } else {
35*4882a593Smuzhiyun /* removing max PCI_CAP_ID_MAX check as
36*4882a593Smuzhiyun * previous kernel versions dont have this definition
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun cap_ptr = pci_find_capability(pdev, cap);
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Return if capability with given ID not found */
42*4882a593Smuzhiyun if (cap_ptr == 0) {
43*4882a593Smuzhiyun pci_err(pdev, "%s: PCI Cap(0x%02x) not supported.\n",
44*4882a593Smuzhiyun __func__, cap);
45*4882a593Smuzhiyun return -EINVAL;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun if (is_write) {
49*4882a593Smuzhiyun pci_write_config_dword(pdev, (cap_ptr + offset), writeval);
50*4882a593Smuzhiyun ret = 0;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun } else {
53*4882a593Smuzhiyun pci_read_config_dword(pdev, (cap_ptr + offset), &readval);
54*4882a593Smuzhiyun ret = readval;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun return ret;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
rockchip_pcie_bus_aspm_enable_dev(char * device,struct pci_dev * dev,bool enable)60*4882a593Smuzhiyun static bool rockchip_pcie_bus_aspm_enable_dev(char *device, struct pci_dev *dev, bool enable)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun u32 linkctrl_before;
63*4882a593Smuzhiyun u32 linkctrl_after = 0;
64*4882a593Smuzhiyun u8 linkctrl_asm;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun linkctrl_before = rockchip_pcie_pcie_access_cap(dev, PCI_CAP_ID_EXP, PCI_EXP_LNKCTL,
67*4882a593Smuzhiyun false, false, 0);
68*4882a593Smuzhiyun linkctrl_asm = (linkctrl_before & PCI_EXP_LNKCTL_ASPMC);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (enable) {
71*4882a593Smuzhiyun if (linkctrl_asm == PCI_EXP_LNKCTL_ASPM_L1) {
72*4882a593Smuzhiyun pci_err(dev, "%s: %s already enabled linkctrl: 0x%x\n",
73*4882a593Smuzhiyun __func__, device, linkctrl_before);
74*4882a593Smuzhiyun return false;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun /* Enable only L1 ASPM (bit 1) */
77*4882a593Smuzhiyun rockchip_pcie_pcie_access_cap(dev, PCI_CAP_ID_EXP, PCI_EXP_LNKCTL, false,
78*4882a593Smuzhiyun true, (linkctrl_before | PCI_EXP_LNKCTL_ASPM_L1));
79*4882a593Smuzhiyun } else {
80*4882a593Smuzhiyun if (linkctrl_asm == 0) {
81*4882a593Smuzhiyun pci_err(dev, "%s: %s already disabled linkctrl: 0x%x\n",
82*4882a593Smuzhiyun __func__, device, linkctrl_before);
83*4882a593Smuzhiyun return false;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun /* Disable complete ASPM (bit 1 and bit 0) */
86*4882a593Smuzhiyun rockchip_pcie_pcie_access_cap(dev, PCI_CAP_ID_EXP, PCI_EXP_LNKCTL, false,
87*4882a593Smuzhiyun true, (linkctrl_before & (~PCI_EXP_LNKCTL_ASPMC)));
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun linkctrl_after = rockchip_pcie_pcie_access_cap(dev, PCI_CAP_ID_EXP, PCI_EXP_LNKCTL,
91*4882a593Smuzhiyun false, false, 0);
92*4882a593Smuzhiyun pci_err(dev, "%s: %s %s, linkctrl_before: 0x%x linkctrl_after: 0x%x\n",
93*4882a593Smuzhiyun __func__, device, (enable ? "ENABLE " : "DISABLE"),
94*4882a593Smuzhiyun linkctrl_before, linkctrl_after);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return true;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
rockchip_pcie_bus_aspm_enable_rc_ep(struct pci_dev * child,struct pci_dev * parent,bool enable)99*4882a593Smuzhiyun static bool rockchip_pcie_bus_aspm_enable_rc_ep(struct pci_dev *child, struct pci_dev *parent, bool enable)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun bool ret;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (enable) {
104*4882a593Smuzhiyun /* Enable only L1 ASPM first RC then EP */
105*4882a593Smuzhiyun ret = rockchip_pcie_bus_aspm_enable_dev("RC", parent, enable);
106*4882a593Smuzhiyun ret = rockchip_pcie_bus_aspm_enable_dev("EP", child, enable);
107*4882a593Smuzhiyun } else {
108*4882a593Smuzhiyun /* Disable complete ASPM first EP then RC */
109*4882a593Smuzhiyun ret = rockchip_pcie_bus_aspm_enable_dev("EP", child, enable);
110*4882a593Smuzhiyun ret = rockchip_pcie_bus_aspm_enable_dev("RC", parent, enable);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun return ret;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
pci_clear_and_set_dword(struct pci_dev * pdev,int pos,u32 clear,u32 set)116*4882a593Smuzhiyun static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
117*4882a593Smuzhiyun u32 clear, u32 set)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun u32 val;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun pci_read_config_dword(pdev, pos, &val);
122*4882a593Smuzhiyun val &= ~clear;
123*4882a593Smuzhiyun val |= set;
124*4882a593Smuzhiyun pci_write_config_dword(pdev, pos, val);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* Convert L1SS T_pwr encoding to usec */
calc_l1ss_pwron(struct pci_dev * pdev,u32 scale,u32 val)128*4882a593Smuzhiyun static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun switch (scale) {
131*4882a593Smuzhiyun case 0:
132*4882a593Smuzhiyun return val * 2;
133*4882a593Smuzhiyun case 1:
134*4882a593Smuzhiyun return val * 10;
135*4882a593Smuzhiyun case 2:
136*4882a593Smuzhiyun return val * 100;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
encode_l12_threshold(u32 threshold_us,u32 * scale,u32 * value)142*4882a593Smuzhiyun static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun u32 threshold_ns = threshold_us * 1000;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */
147*4882a593Smuzhiyun if (threshold_ns < 32) {
148*4882a593Smuzhiyun *scale = 0;
149*4882a593Smuzhiyun *value = threshold_ns;
150*4882a593Smuzhiyun } else if (threshold_ns < 1024) {
151*4882a593Smuzhiyun *scale = 1;
152*4882a593Smuzhiyun *value = threshold_ns >> 5;
153*4882a593Smuzhiyun } else if (threshold_ns < 32768) {
154*4882a593Smuzhiyun *scale = 2;
155*4882a593Smuzhiyun *value = threshold_ns >> 10;
156*4882a593Smuzhiyun } else if (threshold_ns < 1048576) {
157*4882a593Smuzhiyun *scale = 3;
158*4882a593Smuzhiyun *value = threshold_ns >> 15;
159*4882a593Smuzhiyun } else if (threshold_ns < 33554432) {
160*4882a593Smuzhiyun *scale = 4;
161*4882a593Smuzhiyun *value = threshold_ns >> 20;
162*4882a593Smuzhiyun } else {
163*4882a593Smuzhiyun *scale = 5;
164*4882a593Smuzhiyun *value = threshold_ns >> 25;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* Calculate L1.2 PM substate timing parameters */
aspm_calc_l1ss_info(struct pci_dev * child,struct pci_dev * parent)169*4882a593Smuzhiyun static void aspm_calc_l1ss_info(struct pci_dev *child, struct pci_dev *parent)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun u32 val1, val2, scale1, scale2;
172*4882a593Smuzhiyun u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
173*4882a593Smuzhiyun u32 ctl1 = 0, ctl2 = 0;
174*4882a593Smuzhiyun u32 pctl1, pctl2, cctl1, cctl2;
175*4882a593Smuzhiyun u32 pl1_2_enables, cl1_2_enables;
176*4882a593Smuzhiyun u32 parent_l1ss_cap, child_l1ss_cap;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* Setup L1 substate */
179*4882a593Smuzhiyun pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
180*4882a593Smuzhiyun &parent_l1ss_cap);
181*4882a593Smuzhiyun pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
182*4882a593Smuzhiyun &child_l1ss_cap);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* Choose the greater of the two Port Common_Mode_Restore_Times */
185*4882a593Smuzhiyun val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
186*4882a593Smuzhiyun val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
187*4882a593Smuzhiyun t_common_mode = max(val1, val2);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Choose the greater of the two Port T_POWER_ON times */
190*4882a593Smuzhiyun val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
191*4882a593Smuzhiyun scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
192*4882a593Smuzhiyun val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
193*4882a593Smuzhiyun scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (calc_l1ss_pwron(parent, scale1, val1) >
196*4882a593Smuzhiyun calc_l1ss_pwron(child, scale2, val2)) {
197*4882a593Smuzhiyun ctl2 |= scale1 | (val1 << 3);
198*4882a593Smuzhiyun t_power_on = calc_l1ss_pwron(parent, scale1, val1);
199*4882a593Smuzhiyun } else {
200*4882a593Smuzhiyun ctl2 |= scale2 | (val2 << 3);
201*4882a593Smuzhiyun t_power_on = calc_l1ss_pwron(child, scale2, val2);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /* Set LTR_L1.2_THRESHOLD to the time required to transition the
205*4882a593Smuzhiyun * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if
206*4882a593Smuzhiyun * downstream devices report (via LTR) that they can tolerate at
207*4882a593Smuzhiyun * least that much latency.
208*4882a593Smuzhiyun *
209*4882a593Smuzhiyun * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and
210*4882a593Smuzhiyun * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at
211*4882a593Smuzhiyun * least 4us.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
214*4882a593Smuzhiyun encode_l12_threshold(l1_2_threshold, &scale, &value);
215*4882a593Smuzhiyun ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
218*4882a593Smuzhiyun pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
219*4882a593Smuzhiyun pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
220*4882a593Smuzhiyun pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (ctl1 == pctl1 && ctl1 == cctl1 &&
223*4882a593Smuzhiyun ctl2 == pctl2 && ctl2 == cctl2)
224*4882a593Smuzhiyun return;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
227*4882a593Smuzhiyun pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
228*4882a593Smuzhiyun cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (pl1_2_enables || cl1_2_enables) {
231*4882a593Smuzhiyun pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
232*4882a593Smuzhiyun PCI_L1SS_CTL1_L1_2_MASK, 0);
233*4882a593Smuzhiyun pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
234*4882a593Smuzhiyun PCI_L1SS_CTL1_L1_2_MASK, 0);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Program T_POWER_ON times in both ports */
238*4882a593Smuzhiyun pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
239*4882a593Smuzhiyun pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Program Common_Mode_Restore_Time in upstream device */
242*4882a593Smuzhiyun pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
243*4882a593Smuzhiyun PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Program LTR_L1.2_THRESHOLD time in both ports */
246*4882a593Smuzhiyun pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
247*4882a593Smuzhiyun PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
248*4882a593Smuzhiyun PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
249*4882a593Smuzhiyun pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
250*4882a593Smuzhiyun PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
251*4882a593Smuzhiyun PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (pl1_2_enables || cl1_2_enables) {
254*4882a593Smuzhiyun pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
255*4882a593Smuzhiyun pl1_2_enables);
256*4882a593Smuzhiyun pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
257*4882a593Smuzhiyun cl1_2_enables);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
rockchip_pcie_bus_l1ss_enable_dev(char * device,struct pci_dev * dev,bool enable)261*4882a593Smuzhiyun static void rockchip_pcie_bus_l1ss_enable_dev(char *device, struct pci_dev *dev, bool enable)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun u32 l1ssctrl_before;
264*4882a593Smuzhiyun u32 l1ssctrl_after = 0;
265*4882a593Smuzhiyun u8 l1ss_ep;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Extendend Capacility Reg */
268*4882a593Smuzhiyun l1ssctrl_before = rockchip_pcie_pcie_access_cap(dev, PCI_EXT_CAP_ID_L1SS,
269*4882a593Smuzhiyun PCI_L1SS_CTL1, true, false, 0);
270*4882a593Smuzhiyun l1ss_ep = (l1ssctrl_before & PCI_L1SS_CTL1_L1SS_MASK);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (enable) {
273*4882a593Smuzhiyun if (l1ss_ep == PCI_L1SS_CTL1_L1SS_MASK) {
274*4882a593Smuzhiyun pci_err(dev, "%s: %s already enabled, l1ssctrl: 0x%x\n",
275*4882a593Smuzhiyun __func__, device, l1ssctrl_before);
276*4882a593Smuzhiyun return;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun rockchip_pcie_pcie_access_cap(dev, PCI_EXT_CAP_ID_L1SS, PCI_L1SS_CTL1,
279*4882a593Smuzhiyun true, true, (l1ssctrl_before | PCI_L1SS_CTL1_L1SS_MASK));
280*4882a593Smuzhiyun } else {
281*4882a593Smuzhiyun if (l1ss_ep == 0) {
282*4882a593Smuzhiyun pci_err(dev, "%s: %s already disabled, l1ssctrl: 0x%x\n",
283*4882a593Smuzhiyun __func__, device, l1ssctrl_before);
284*4882a593Smuzhiyun return;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun rockchip_pcie_pcie_access_cap(dev, PCI_EXT_CAP_ID_L1SS, PCI_L1SS_CTL1,
287*4882a593Smuzhiyun true, true, (l1ssctrl_before & (~PCI_L1SS_CTL1_L1SS_MASK)));
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun l1ssctrl_after = rockchip_pcie_pcie_access_cap(dev, PCI_EXT_CAP_ID_L1SS,
290*4882a593Smuzhiyun PCI_L1SS_CTL1, true, false, 0);
291*4882a593Smuzhiyun pci_err(dev, "%s: %s %s, l1ssctrl_before: 0x%x l1ssctrl_after: 0x%x\n",
292*4882a593Smuzhiyun __func__, device, (enable ? "ENABLE " : "DISABLE"),
293*4882a593Smuzhiyun l1ssctrl_before, l1ssctrl_after);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
pcie_aspm_ext_is_rc_ep_l1ss_capable(struct pci_dev * child,struct pci_dev * parent)296*4882a593Smuzhiyun bool pcie_aspm_ext_is_rc_ep_l1ss_capable(struct pci_dev *child, struct pci_dev *parent)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun u32 parent_l1ss_cap, child_l1ss_cap;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Setup L1 substate */
301*4882a593Smuzhiyun pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
302*4882a593Smuzhiyun &parent_l1ss_cap);
303*4882a593Smuzhiyun pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
304*4882a593Smuzhiyun &child_l1ss_cap);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
307*4882a593Smuzhiyun parent_l1ss_cap = 0;
308*4882a593Smuzhiyun if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
309*4882a593Smuzhiyun child_l1ss_cap = 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (parent_l1ss_cap && child_l1ss_cap)
312*4882a593Smuzhiyun return true;
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun return false;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun EXPORT_SYMBOL(pcie_aspm_ext_is_rc_ep_l1ss_capable);
317*4882a593Smuzhiyun
pcie_aspm_ext_l1ss_enable(struct pci_dev * child,struct pci_dev * parent,bool enable)318*4882a593Smuzhiyun void pcie_aspm_ext_l1ss_enable(struct pci_dev *child, struct pci_dev *parent, bool enable)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun bool ret;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Disable ASPM of RC and EP */
323*4882a593Smuzhiyun ret = rockchip_pcie_bus_aspm_enable_rc_ep(child, parent, false);
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (enable) {
326*4882a593Smuzhiyun /* LRT enable bits loss after wifi off, enable it after power on */
327*4882a593Smuzhiyun if (parent->ltr_path)
328*4882a593Smuzhiyun pcie_capability_set_word(parent, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Enable RC then EP */
331*4882a593Smuzhiyun aspm_calc_l1ss_info(child, parent);
332*4882a593Smuzhiyun rockchip_pcie_bus_l1ss_enable_dev("RC", parent, enable);
333*4882a593Smuzhiyun rockchip_pcie_bus_l1ss_enable_dev("EP", child, enable);
334*4882a593Smuzhiyun } else {
335*4882a593Smuzhiyun /* Disable EP then RC */
336*4882a593Smuzhiyun rockchip_pcie_bus_l1ss_enable_dev("EP", child, enable);
337*4882a593Smuzhiyun rockchip_pcie_bus_l1ss_enable_dev("RC", parent, enable);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* Enable ASPM of RC and EP only if this API disabled */
341*4882a593Smuzhiyun if (ret)
342*4882a593Smuzhiyun rockchip_pcie_bus_aspm_enable_rc_ep(child, parent, true);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun EXPORT_SYMBOL(pcie_aspm_ext_l1ss_enable);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun MODULE_LICENSE("GPL");
347