1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCI support in ACPI
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005 David Shaohua Li <shaohua.li@intel.com>
6*4882a593Smuzhiyun * Copyright (C) 2004 Tom Long Nguyen <tom.l.nguyen@intel.com>
7*4882a593Smuzhiyun * Copyright (C) 2004 Intel Corp.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/irqdomain.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/msi.h>
15*4882a593Smuzhiyun #include <linux/pci_hotplug.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/pci-acpi.h>
18*4882a593Smuzhiyun #include <linux/pm_runtime.h>
19*4882a593Smuzhiyun #include <linux/pm_qos.h>
20*4882a593Smuzhiyun #include "pci.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * The GUID is defined in the PCI Firmware Specification available here:
24*4882a593Smuzhiyun * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun const guid_t pci_acpi_dsm_guid =
27*4882a593Smuzhiyun GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a,
28*4882a593Smuzhiyun 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64)
acpi_get_rc_addr(struct acpi_device * adev,struct resource * res)31*4882a593Smuzhiyun static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct device *dev = &adev->dev;
34*4882a593Smuzhiyun struct resource_entry *entry;
35*4882a593Smuzhiyun struct list_head list;
36*4882a593Smuzhiyun unsigned long flags;
37*4882a593Smuzhiyun int ret;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun INIT_LIST_HEAD(&list);
40*4882a593Smuzhiyun flags = IORESOURCE_MEM;
41*4882a593Smuzhiyun ret = acpi_dev_get_resources(adev, &list,
42*4882a593Smuzhiyun acpi_dev_filter_resource_type_cb,
43*4882a593Smuzhiyun (void *) flags);
44*4882a593Smuzhiyun if (ret < 0) {
45*4882a593Smuzhiyun dev_err(dev, "failed to parse _CRS method, error code %d\n",
46*4882a593Smuzhiyun ret);
47*4882a593Smuzhiyun return ret;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (ret == 0) {
51*4882a593Smuzhiyun dev_err(dev, "no IO and memory resources present in _CRS\n");
52*4882a593Smuzhiyun return -EINVAL;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun entry = list_first_entry(&list, struct resource_entry, node);
56*4882a593Smuzhiyun *res = *entry->res;
57*4882a593Smuzhiyun acpi_dev_free_resource_list(&list);
58*4882a593Smuzhiyun return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
acpi_match_rc(acpi_handle handle,u32 lvl,void * context,void ** retval)61*4882a593Smuzhiyun static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context,
62*4882a593Smuzhiyun void **retval)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun u16 *segment = context;
65*4882a593Smuzhiyun unsigned long long uid;
66*4882a593Smuzhiyun acpi_status status;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
69*4882a593Smuzhiyun if (ACPI_FAILURE(status) || uid != *segment)
70*4882a593Smuzhiyun return AE_CTRL_DEPTH;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun *(acpi_handle *)retval = handle;
73*4882a593Smuzhiyun return AE_CTRL_TERMINATE;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
acpi_get_rc_resources(struct device * dev,const char * hid,u16 segment,struct resource * res)76*4882a593Smuzhiyun int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment,
77*4882a593Smuzhiyun struct resource *res)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct acpi_device *adev;
80*4882a593Smuzhiyun acpi_status status;
81*4882a593Smuzhiyun acpi_handle handle;
82*4882a593Smuzhiyun int ret;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle);
85*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
86*4882a593Smuzhiyun dev_err(dev, "can't find _HID %s device to locate resources\n",
87*4882a593Smuzhiyun hid);
88*4882a593Smuzhiyun return -ENODEV;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun ret = acpi_bus_get_device(handle, &adev);
92*4882a593Smuzhiyun if (ret)
93*4882a593Smuzhiyun return ret;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun ret = acpi_get_rc_addr(adev, res);
96*4882a593Smuzhiyun if (ret) {
97*4882a593Smuzhiyun dev_err(dev, "can't get resource from %s\n",
98*4882a593Smuzhiyun dev_name(&adev->dev));
99*4882a593Smuzhiyun return ret;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun return 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun
acpi_pci_root_get_mcfg_addr(acpi_handle handle)106*4882a593Smuzhiyun phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun acpi_status status = AE_NOT_EXIST;
109*4882a593Smuzhiyun unsigned long long mcfg_addr;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (handle)
112*4882a593Smuzhiyun status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
113*4882a593Smuzhiyun NULL, &mcfg_addr);
114*4882a593Smuzhiyun if (ACPI_FAILURE(status))
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return (phys_addr_t)mcfg_addr;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* _HPX PCI Setting Record (Type 0); same as _HPP */
121*4882a593Smuzhiyun struct hpx_type0 {
122*4882a593Smuzhiyun u32 revision; /* Not present in _HPP */
123*4882a593Smuzhiyun u8 cache_line_size; /* Not applicable to PCIe */
124*4882a593Smuzhiyun u8 latency_timer; /* Not applicable to PCIe */
125*4882a593Smuzhiyun u8 enable_serr;
126*4882a593Smuzhiyun u8 enable_perr;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun static struct hpx_type0 pci_default_type0 = {
130*4882a593Smuzhiyun .revision = 1,
131*4882a593Smuzhiyun .cache_line_size = 8,
132*4882a593Smuzhiyun .latency_timer = 0x40,
133*4882a593Smuzhiyun .enable_serr = 0,
134*4882a593Smuzhiyun .enable_perr = 0,
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
program_hpx_type0(struct pci_dev * dev,struct hpx_type0 * hpx)137*4882a593Smuzhiyun static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun u16 pci_cmd, pci_bctl;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (!hpx)
142*4882a593Smuzhiyun hpx = &pci_default_type0;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (hpx->revision > 1) {
145*4882a593Smuzhiyun pci_warn(dev, "PCI settings rev %d not supported; using defaults\n",
146*4882a593Smuzhiyun hpx->revision);
147*4882a593Smuzhiyun hpx = &pci_default_type0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size);
151*4882a593Smuzhiyun pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer);
152*4882a593Smuzhiyun pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
153*4882a593Smuzhiyun if (hpx->enable_serr)
154*4882a593Smuzhiyun pci_cmd |= PCI_COMMAND_SERR;
155*4882a593Smuzhiyun if (hpx->enable_perr)
156*4882a593Smuzhiyun pci_cmd |= PCI_COMMAND_PARITY;
157*4882a593Smuzhiyun pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Program bridge control value */
160*4882a593Smuzhiyun if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
161*4882a593Smuzhiyun pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
162*4882a593Smuzhiyun hpx->latency_timer);
163*4882a593Smuzhiyun pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
164*4882a593Smuzhiyun if (hpx->enable_perr)
165*4882a593Smuzhiyun pci_bctl |= PCI_BRIDGE_CTL_PARITY;
166*4882a593Smuzhiyun pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
decode_type0_hpx_record(union acpi_object * record,struct hpx_type0 * hpx0)170*4882a593Smuzhiyun static acpi_status decode_type0_hpx_record(union acpi_object *record,
171*4882a593Smuzhiyun struct hpx_type0 *hpx0)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun int i;
174*4882a593Smuzhiyun union acpi_object *fields = record->package.elements;
175*4882a593Smuzhiyun u32 revision = fields[1].integer.value;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun switch (revision) {
178*4882a593Smuzhiyun case 1:
179*4882a593Smuzhiyun if (record->package.count != 6)
180*4882a593Smuzhiyun return AE_ERROR;
181*4882a593Smuzhiyun for (i = 2; i < 6; i++)
182*4882a593Smuzhiyun if (fields[i].type != ACPI_TYPE_INTEGER)
183*4882a593Smuzhiyun return AE_ERROR;
184*4882a593Smuzhiyun hpx0->revision = revision;
185*4882a593Smuzhiyun hpx0->cache_line_size = fields[2].integer.value;
186*4882a593Smuzhiyun hpx0->latency_timer = fields[3].integer.value;
187*4882a593Smuzhiyun hpx0->enable_serr = fields[4].integer.value;
188*4882a593Smuzhiyun hpx0->enable_perr = fields[5].integer.value;
189*4882a593Smuzhiyun break;
190*4882a593Smuzhiyun default:
191*4882a593Smuzhiyun pr_warn("%s: Type 0 Revision %d record not supported\n",
192*4882a593Smuzhiyun __func__, revision);
193*4882a593Smuzhiyun return AE_ERROR;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun return AE_OK;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* _HPX PCI-X Setting Record (Type 1) */
199*4882a593Smuzhiyun struct hpx_type1 {
200*4882a593Smuzhiyun u32 revision;
201*4882a593Smuzhiyun u8 max_mem_read;
202*4882a593Smuzhiyun u8 avg_max_split;
203*4882a593Smuzhiyun u16 tot_max_split;
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
program_hpx_type1(struct pci_dev * dev,struct hpx_type1 * hpx)206*4882a593Smuzhiyun static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun int pos;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (!hpx)
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
214*4882a593Smuzhiyun if (!pos)
215*4882a593Smuzhiyun return;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun pci_warn(dev, "PCI-X settings not supported\n");
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
decode_type1_hpx_record(union acpi_object * record,struct hpx_type1 * hpx1)220*4882a593Smuzhiyun static acpi_status decode_type1_hpx_record(union acpi_object *record,
221*4882a593Smuzhiyun struct hpx_type1 *hpx1)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun int i;
224*4882a593Smuzhiyun union acpi_object *fields = record->package.elements;
225*4882a593Smuzhiyun u32 revision = fields[1].integer.value;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun switch (revision) {
228*4882a593Smuzhiyun case 1:
229*4882a593Smuzhiyun if (record->package.count != 5)
230*4882a593Smuzhiyun return AE_ERROR;
231*4882a593Smuzhiyun for (i = 2; i < 5; i++)
232*4882a593Smuzhiyun if (fields[i].type != ACPI_TYPE_INTEGER)
233*4882a593Smuzhiyun return AE_ERROR;
234*4882a593Smuzhiyun hpx1->revision = revision;
235*4882a593Smuzhiyun hpx1->max_mem_read = fields[2].integer.value;
236*4882a593Smuzhiyun hpx1->avg_max_split = fields[3].integer.value;
237*4882a593Smuzhiyun hpx1->tot_max_split = fields[4].integer.value;
238*4882a593Smuzhiyun break;
239*4882a593Smuzhiyun default:
240*4882a593Smuzhiyun pr_warn("%s: Type 1 Revision %d record not supported\n",
241*4882a593Smuzhiyun __func__, revision);
242*4882a593Smuzhiyun return AE_ERROR;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun return AE_OK;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
pcie_root_rcb_set(struct pci_dev * dev)247*4882a593Smuzhiyun static bool pcie_root_rcb_set(struct pci_dev *dev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct pci_dev *rp = pcie_find_root_port(dev);
250*4882a593Smuzhiyun u16 lnkctl;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (!rp)
253*4882a593Smuzhiyun return false;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
256*4882a593Smuzhiyun if (lnkctl & PCI_EXP_LNKCTL_RCB)
257*4882a593Smuzhiyun return true;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return false;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* _HPX PCI Express Setting Record (Type 2) */
263*4882a593Smuzhiyun struct hpx_type2 {
264*4882a593Smuzhiyun u32 revision;
265*4882a593Smuzhiyun u32 unc_err_mask_and;
266*4882a593Smuzhiyun u32 unc_err_mask_or;
267*4882a593Smuzhiyun u32 unc_err_sever_and;
268*4882a593Smuzhiyun u32 unc_err_sever_or;
269*4882a593Smuzhiyun u32 cor_err_mask_and;
270*4882a593Smuzhiyun u32 cor_err_mask_or;
271*4882a593Smuzhiyun u32 adv_err_cap_and;
272*4882a593Smuzhiyun u32 adv_err_cap_or;
273*4882a593Smuzhiyun u16 pci_exp_devctl_and;
274*4882a593Smuzhiyun u16 pci_exp_devctl_or;
275*4882a593Smuzhiyun u16 pci_exp_lnkctl_and;
276*4882a593Smuzhiyun u16 pci_exp_lnkctl_or;
277*4882a593Smuzhiyun u32 sec_unc_err_sever_and;
278*4882a593Smuzhiyun u32 sec_unc_err_sever_or;
279*4882a593Smuzhiyun u32 sec_unc_err_mask_and;
280*4882a593Smuzhiyun u32 sec_unc_err_mask_or;
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun
program_hpx_type2(struct pci_dev * dev,struct hpx_type2 * hpx)283*4882a593Smuzhiyun static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun int pos;
286*4882a593Smuzhiyun u32 reg32;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!hpx)
289*4882a593Smuzhiyun return;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (!pci_is_pcie(dev))
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (hpx->revision > 1) {
295*4882a593Smuzhiyun pci_warn(dev, "PCIe settings rev %d not supported\n",
296*4882a593Smuzhiyun hpx->revision);
297*4882a593Smuzhiyun return;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * Don't allow _HPX to change MPS or MRRS settings. We manage
302*4882a593Smuzhiyun * those to make sure they're consistent with the rest of the
303*4882a593Smuzhiyun * platform.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
306*4882a593Smuzhiyun PCI_EXP_DEVCTL_READRQ;
307*4882a593Smuzhiyun hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
308*4882a593Smuzhiyun PCI_EXP_DEVCTL_READRQ);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Initialize Device Control Register */
311*4882a593Smuzhiyun pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
312*4882a593Smuzhiyun ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* Initialize Link Control Register */
315*4882a593Smuzhiyun if (pcie_cap_has_lnkctl(dev)) {
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * If the Root Port supports Read Completion Boundary of
319*4882a593Smuzhiyun * 128, set RCB to 128. Otherwise, clear it.
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
322*4882a593Smuzhiyun hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
323*4882a593Smuzhiyun if (pcie_root_rcb_set(dev))
324*4882a593Smuzhiyun hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
327*4882a593Smuzhiyun ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Find Advanced Error Reporting Enhanced Capability */
331*4882a593Smuzhiyun pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
332*4882a593Smuzhiyun if (!pos)
333*4882a593Smuzhiyun return;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Initialize Uncorrectable Error Mask Register */
336*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, ®32);
337*4882a593Smuzhiyun reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or;
338*4882a593Smuzhiyun pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* Initialize Uncorrectable Error Severity Register */
341*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, ®32);
342*4882a593Smuzhiyun reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or;
343*4882a593Smuzhiyun pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* Initialize Correctable Error Mask Register */
346*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, ®32);
347*4882a593Smuzhiyun reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or;
348*4882a593Smuzhiyun pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Initialize Advanced Error Capabilities and Control Register */
351*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32);
352*4882a593Smuzhiyun reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Don't enable ECRC generation or checking if unsupported */
355*4882a593Smuzhiyun if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
356*4882a593Smuzhiyun reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
357*4882a593Smuzhiyun if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
358*4882a593Smuzhiyun reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
359*4882a593Smuzhiyun pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun * FIXME: The following two registers are not supported yet.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * o Secondary Uncorrectable Error Severity Register
365*4882a593Smuzhiyun * o Secondary Uncorrectable Error Mask Register
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
decode_type2_hpx_record(union acpi_object * record,struct hpx_type2 * hpx2)369*4882a593Smuzhiyun static acpi_status decode_type2_hpx_record(union acpi_object *record,
370*4882a593Smuzhiyun struct hpx_type2 *hpx2)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun int i;
373*4882a593Smuzhiyun union acpi_object *fields = record->package.elements;
374*4882a593Smuzhiyun u32 revision = fields[1].integer.value;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun switch (revision) {
377*4882a593Smuzhiyun case 1:
378*4882a593Smuzhiyun if (record->package.count != 18)
379*4882a593Smuzhiyun return AE_ERROR;
380*4882a593Smuzhiyun for (i = 2; i < 18; i++)
381*4882a593Smuzhiyun if (fields[i].type != ACPI_TYPE_INTEGER)
382*4882a593Smuzhiyun return AE_ERROR;
383*4882a593Smuzhiyun hpx2->revision = revision;
384*4882a593Smuzhiyun hpx2->unc_err_mask_and = fields[2].integer.value;
385*4882a593Smuzhiyun hpx2->unc_err_mask_or = fields[3].integer.value;
386*4882a593Smuzhiyun hpx2->unc_err_sever_and = fields[4].integer.value;
387*4882a593Smuzhiyun hpx2->unc_err_sever_or = fields[5].integer.value;
388*4882a593Smuzhiyun hpx2->cor_err_mask_and = fields[6].integer.value;
389*4882a593Smuzhiyun hpx2->cor_err_mask_or = fields[7].integer.value;
390*4882a593Smuzhiyun hpx2->adv_err_cap_and = fields[8].integer.value;
391*4882a593Smuzhiyun hpx2->adv_err_cap_or = fields[9].integer.value;
392*4882a593Smuzhiyun hpx2->pci_exp_devctl_and = fields[10].integer.value;
393*4882a593Smuzhiyun hpx2->pci_exp_devctl_or = fields[11].integer.value;
394*4882a593Smuzhiyun hpx2->pci_exp_lnkctl_and = fields[12].integer.value;
395*4882a593Smuzhiyun hpx2->pci_exp_lnkctl_or = fields[13].integer.value;
396*4882a593Smuzhiyun hpx2->sec_unc_err_sever_and = fields[14].integer.value;
397*4882a593Smuzhiyun hpx2->sec_unc_err_sever_or = fields[15].integer.value;
398*4882a593Smuzhiyun hpx2->sec_unc_err_mask_and = fields[16].integer.value;
399*4882a593Smuzhiyun hpx2->sec_unc_err_mask_or = fields[17].integer.value;
400*4882a593Smuzhiyun break;
401*4882a593Smuzhiyun default:
402*4882a593Smuzhiyun pr_warn("%s: Type 2 Revision %d record not supported\n",
403*4882a593Smuzhiyun __func__, revision);
404*4882a593Smuzhiyun return AE_ERROR;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun return AE_OK;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* _HPX PCI Express Setting Record (Type 3) */
410*4882a593Smuzhiyun struct hpx_type3 {
411*4882a593Smuzhiyun u16 device_type;
412*4882a593Smuzhiyun u16 function_type;
413*4882a593Smuzhiyun u16 config_space_location;
414*4882a593Smuzhiyun u16 pci_exp_cap_id;
415*4882a593Smuzhiyun u16 pci_exp_cap_ver;
416*4882a593Smuzhiyun u16 pci_exp_vendor_id;
417*4882a593Smuzhiyun u16 dvsec_id;
418*4882a593Smuzhiyun u16 dvsec_rev;
419*4882a593Smuzhiyun u16 match_offset;
420*4882a593Smuzhiyun u32 match_mask_and;
421*4882a593Smuzhiyun u32 match_value;
422*4882a593Smuzhiyun u16 reg_offset;
423*4882a593Smuzhiyun u32 reg_mask_and;
424*4882a593Smuzhiyun u32 reg_mask_or;
425*4882a593Smuzhiyun };
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun enum hpx_type3_dev_type {
428*4882a593Smuzhiyun HPX_TYPE_ENDPOINT = BIT(0),
429*4882a593Smuzhiyun HPX_TYPE_LEG_END = BIT(1),
430*4882a593Smuzhiyun HPX_TYPE_RC_END = BIT(2),
431*4882a593Smuzhiyun HPX_TYPE_RC_EC = BIT(3),
432*4882a593Smuzhiyun HPX_TYPE_ROOT_PORT = BIT(4),
433*4882a593Smuzhiyun HPX_TYPE_UPSTREAM = BIT(5),
434*4882a593Smuzhiyun HPX_TYPE_DOWNSTREAM = BIT(6),
435*4882a593Smuzhiyun HPX_TYPE_PCI_BRIDGE = BIT(7),
436*4882a593Smuzhiyun HPX_TYPE_PCIE_BRIDGE = BIT(8),
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun
hpx3_device_type(struct pci_dev * dev)439*4882a593Smuzhiyun static u16 hpx3_device_type(struct pci_dev *dev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun u16 pcie_type = pci_pcie_type(dev);
442*4882a593Smuzhiyun static const int pcie_to_hpx3_type[] = {
443*4882a593Smuzhiyun [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT,
444*4882a593Smuzhiyun [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END,
445*4882a593Smuzhiyun [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END,
446*4882a593Smuzhiyun [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC,
447*4882a593Smuzhiyun [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT,
448*4882a593Smuzhiyun [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM,
449*4882a593Smuzhiyun [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM,
450*4882a593Smuzhiyun [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE,
451*4882a593Smuzhiyun [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE,
452*4882a593Smuzhiyun };
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type))
455*4882a593Smuzhiyun return 0;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun return pcie_to_hpx3_type[pcie_type];
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun enum hpx_type3_fn_type {
461*4882a593Smuzhiyun HPX_FN_NORMAL = BIT(0),
462*4882a593Smuzhiyun HPX_FN_SRIOV_PHYS = BIT(1),
463*4882a593Smuzhiyun HPX_FN_SRIOV_VIRT = BIT(2),
464*4882a593Smuzhiyun };
465*4882a593Smuzhiyun
hpx3_function_type(struct pci_dev * dev)466*4882a593Smuzhiyun static u8 hpx3_function_type(struct pci_dev *dev)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun if (dev->is_virtfn)
469*4882a593Smuzhiyun return HPX_FN_SRIOV_VIRT;
470*4882a593Smuzhiyun else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0)
471*4882a593Smuzhiyun return HPX_FN_SRIOV_PHYS;
472*4882a593Smuzhiyun else
473*4882a593Smuzhiyun return HPX_FN_NORMAL;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
hpx3_cap_ver_matches(u8 pcie_cap_id,u8 hpx3_cap_id)476*4882a593Smuzhiyun static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun u8 cap_ver = hpx3_cap_id & 0xf;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id)
481*4882a593Smuzhiyun return true;
482*4882a593Smuzhiyun else if (cap_ver == pcie_cap_id)
483*4882a593Smuzhiyun return true;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return false;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun enum hpx_type3_cfg_loc {
489*4882a593Smuzhiyun HPX_CFG_PCICFG = 0,
490*4882a593Smuzhiyun HPX_CFG_PCIE_CAP = 1,
491*4882a593Smuzhiyun HPX_CFG_PCIE_CAP_EXT = 2,
492*4882a593Smuzhiyun HPX_CFG_VEND_CAP = 3,
493*4882a593Smuzhiyun HPX_CFG_DVSEC = 4,
494*4882a593Smuzhiyun HPX_CFG_MAX,
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun
program_hpx_type3_register(struct pci_dev * dev,const struct hpx_type3 * reg)497*4882a593Smuzhiyun static void program_hpx_type3_register(struct pci_dev *dev,
498*4882a593Smuzhiyun const struct hpx_type3 *reg)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun u32 match_reg, write_reg, header, orig_value;
501*4882a593Smuzhiyun u16 pos;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (!(hpx3_device_type(dev) & reg->device_type))
504*4882a593Smuzhiyun return;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (!(hpx3_function_type(dev) & reg->function_type))
507*4882a593Smuzhiyun return;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun switch (reg->config_space_location) {
510*4882a593Smuzhiyun case HPX_CFG_PCICFG:
511*4882a593Smuzhiyun pos = 0;
512*4882a593Smuzhiyun break;
513*4882a593Smuzhiyun case HPX_CFG_PCIE_CAP:
514*4882a593Smuzhiyun pos = pci_find_capability(dev, reg->pci_exp_cap_id);
515*4882a593Smuzhiyun if (pos == 0)
516*4882a593Smuzhiyun return;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun break;
519*4882a593Smuzhiyun case HPX_CFG_PCIE_CAP_EXT:
520*4882a593Smuzhiyun pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id);
521*4882a593Smuzhiyun if (pos == 0)
522*4882a593Smuzhiyun return;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun pci_read_config_dword(dev, pos, &header);
525*4882a593Smuzhiyun if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header),
526*4882a593Smuzhiyun reg->pci_exp_cap_ver))
527*4882a593Smuzhiyun return;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun break;
530*4882a593Smuzhiyun case HPX_CFG_VEND_CAP:
531*4882a593Smuzhiyun case HPX_CFG_DVSEC:
532*4882a593Smuzhiyun default:
533*4882a593Smuzhiyun pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location");
534*4882a593Smuzhiyun return;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun pci_read_config_dword(dev, pos + reg->match_offset, &match_reg);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if ((match_reg & reg->match_mask_and) != reg->match_value)
540*4882a593Smuzhiyun return;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg);
543*4882a593Smuzhiyun orig_value = write_reg;
544*4882a593Smuzhiyun write_reg &= reg->reg_mask_and;
545*4882a593Smuzhiyun write_reg |= reg->reg_mask_or;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (orig_value == write_reg)
548*4882a593Smuzhiyun return;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun pci_write_config_dword(dev, pos + reg->reg_offset, write_reg);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x",
553*4882a593Smuzhiyun pos, orig_value, write_reg);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
program_hpx_type3(struct pci_dev * dev,struct hpx_type3 * hpx)556*4882a593Smuzhiyun static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun if (!hpx)
559*4882a593Smuzhiyun return;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!pci_is_pcie(dev))
562*4882a593Smuzhiyun return;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun program_hpx_type3_register(dev, hpx);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
parse_hpx3_register(struct hpx_type3 * hpx3_reg,union acpi_object * reg_fields)567*4882a593Smuzhiyun static void parse_hpx3_register(struct hpx_type3 *hpx3_reg,
568*4882a593Smuzhiyun union acpi_object *reg_fields)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun hpx3_reg->device_type = reg_fields[0].integer.value;
571*4882a593Smuzhiyun hpx3_reg->function_type = reg_fields[1].integer.value;
572*4882a593Smuzhiyun hpx3_reg->config_space_location = reg_fields[2].integer.value;
573*4882a593Smuzhiyun hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value;
574*4882a593Smuzhiyun hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value;
575*4882a593Smuzhiyun hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value;
576*4882a593Smuzhiyun hpx3_reg->dvsec_id = reg_fields[6].integer.value;
577*4882a593Smuzhiyun hpx3_reg->dvsec_rev = reg_fields[7].integer.value;
578*4882a593Smuzhiyun hpx3_reg->match_offset = reg_fields[8].integer.value;
579*4882a593Smuzhiyun hpx3_reg->match_mask_and = reg_fields[9].integer.value;
580*4882a593Smuzhiyun hpx3_reg->match_value = reg_fields[10].integer.value;
581*4882a593Smuzhiyun hpx3_reg->reg_offset = reg_fields[11].integer.value;
582*4882a593Smuzhiyun hpx3_reg->reg_mask_and = reg_fields[12].integer.value;
583*4882a593Smuzhiyun hpx3_reg->reg_mask_or = reg_fields[13].integer.value;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
program_type3_hpx_record(struct pci_dev * dev,union acpi_object * record)586*4882a593Smuzhiyun static acpi_status program_type3_hpx_record(struct pci_dev *dev,
587*4882a593Smuzhiyun union acpi_object *record)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun union acpi_object *fields = record->package.elements;
590*4882a593Smuzhiyun u32 desc_count, expected_length, revision;
591*4882a593Smuzhiyun union acpi_object *reg_fields;
592*4882a593Smuzhiyun struct hpx_type3 hpx3;
593*4882a593Smuzhiyun int i;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun revision = fields[1].integer.value;
596*4882a593Smuzhiyun switch (revision) {
597*4882a593Smuzhiyun case 1:
598*4882a593Smuzhiyun desc_count = fields[2].integer.value;
599*4882a593Smuzhiyun expected_length = 3 + desc_count * 14;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (record->package.count != expected_length)
602*4882a593Smuzhiyun return AE_ERROR;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun for (i = 2; i < expected_length; i++)
605*4882a593Smuzhiyun if (fields[i].type != ACPI_TYPE_INTEGER)
606*4882a593Smuzhiyun return AE_ERROR;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun for (i = 0; i < desc_count; i++) {
609*4882a593Smuzhiyun reg_fields = fields + 3 + i * 14;
610*4882a593Smuzhiyun parse_hpx3_register(&hpx3, reg_fields);
611*4882a593Smuzhiyun program_hpx_type3(dev, &hpx3);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun break;
615*4882a593Smuzhiyun default:
616*4882a593Smuzhiyun printk(KERN_WARNING
617*4882a593Smuzhiyun "%s: Type 3 Revision %d record not supported\n",
618*4882a593Smuzhiyun __func__, revision);
619*4882a593Smuzhiyun return AE_ERROR;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun return AE_OK;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
acpi_run_hpx(struct pci_dev * dev,acpi_handle handle)624*4882a593Smuzhiyun static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun acpi_status status;
627*4882a593Smuzhiyun struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
628*4882a593Smuzhiyun union acpi_object *package, *record, *fields;
629*4882a593Smuzhiyun struct hpx_type0 hpx0;
630*4882a593Smuzhiyun struct hpx_type1 hpx1;
631*4882a593Smuzhiyun struct hpx_type2 hpx2;
632*4882a593Smuzhiyun u32 type;
633*4882a593Smuzhiyun int i;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
636*4882a593Smuzhiyun if (ACPI_FAILURE(status))
637*4882a593Smuzhiyun return status;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun package = (union acpi_object *)buffer.pointer;
640*4882a593Smuzhiyun if (package->type != ACPI_TYPE_PACKAGE) {
641*4882a593Smuzhiyun status = AE_ERROR;
642*4882a593Smuzhiyun goto exit;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun for (i = 0; i < package->package.count; i++) {
646*4882a593Smuzhiyun record = &package->package.elements[i];
647*4882a593Smuzhiyun if (record->type != ACPI_TYPE_PACKAGE) {
648*4882a593Smuzhiyun status = AE_ERROR;
649*4882a593Smuzhiyun goto exit;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun fields = record->package.elements;
653*4882a593Smuzhiyun if (fields[0].type != ACPI_TYPE_INTEGER ||
654*4882a593Smuzhiyun fields[1].type != ACPI_TYPE_INTEGER) {
655*4882a593Smuzhiyun status = AE_ERROR;
656*4882a593Smuzhiyun goto exit;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun type = fields[0].integer.value;
660*4882a593Smuzhiyun switch (type) {
661*4882a593Smuzhiyun case 0:
662*4882a593Smuzhiyun memset(&hpx0, 0, sizeof(hpx0));
663*4882a593Smuzhiyun status = decode_type0_hpx_record(record, &hpx0);
664*4882a593Smuzhiyun if (ACPI_FAILURE(status))
665*4882a593Smuzhiyun goto exit;
666*4882a593Smuzhiyun program_hpx_type0(dev, &hpx0);
667*4882a593Smuzhiyun break;
668*4882a593Smuzhiyun case 1:
669*4882a593Smuzhiyun memset(&hpx1, 0, sizeof(hpx1));
670*4882a593Smuzhiyun status = decode_type1_hpx_record(record, &hpx1);
671*4882a593Smuzhiyun if (ACPI_FAILURE(status))
672*4882a593Smuzhiyun goto exit;
673*4882a593Smuzhiyun program_hpx_type1(dev, &hpx1);
674*4882a593Smuzhiyun break;
675*4882a593Smuzhiyun case 2:
676*4882a593Smuzhiyun memset(&hpx2, 0, sizeof(hpx2));
677*4882a593Smuzhiyun status = decode_type2_hpx_record(record, &hpx2);
678*4882a593Smuzhiyun if (ACPI_FAILURE(status))
679*4882a593Smuzhiyun goto exit;
680*4882a593Smuzhiyun program_hpx_type2(dev, &hpx2);
681*4882a593Smuzhiyun break;
682*4882a593Smuzhiyun case 3:
683*4882a593Smuzhiyun status = program_type3_hpx_record(dev, record);
684*4882a593Smuzhiyun if (ACPI_FAILURE(status))
685*4882a593Smuzhiyun goto exit;
686*4882a593Smuzhiyun break;
687*4882a593Smuzhiyun default:
688*4882a593Smuzhiyun pr_err("%s: Type %d record not supported\n",
689*4882a593Smuzhiyun __func__, type);
690*4882a593Smuzhiyun status = AE_ERROR;
691*4882a593Smuzhiyun goto exit;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun exit:
695*4882a593Smuzhiyun kfree(buffer.pointer);
696*4882a593Smuzhiyun return status;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
acpi_run_hpp(struct pci_dev * dev,acpi_handle handle)699*4882a593Smuzhiyun static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun acpi_status status;
702*4882a593Smuzhiyun struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
703*4882a593Smuzhiyun union acpi_object *package, *fields;
704*4882a593Smuzhiyun struct hpx_type0 hpx0;
705*4882a593Smuzhiyun int i;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun memset(&hpx0, 0, sizeof(hpx0));
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
710*4882a593Smuzhiyun if (ACPI_FAILURE(status))
711*4882a593Smuzhiyun return status;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun package = (union acpi_object *) buffer.pointer;
714*4882a593Smuzhiyun if (package->type != ACPI_TYPE_PACKAGE ||
715*4882a593Smuzhiyun package->package.count != 4) {
716*4882a593Smuzhiyun status = AE_ERROR;
717*4882a593Smuzhiyun goto exit;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun fields = package->package.elements;
721*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
722*4882a593Smuzhiyun if (fields[i].type != ACPI_TYPE_INTEGER) {
723*4882a593Smuzhiyun status = AE_ERROR;
724*4882a593Smuzhiyun goto exit;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun hpx0.revision = 1;
729*4882a593Smuzhiyun hpx0.cache_line_size = fields[0].integer.value;
730*4882a593Smuzhiyun hpx0.latency_timer = fields[1].integer.value;
731*4882a593Smuzhiyun hpx0.enable_serr = fields[2].integer.value;
732*4882a593Smuzhiyun hpx0.enable_perr = fields[3].integer.value;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun program_hpx_type0(dev, &hpx0);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun exit:
737*4882a593Smuzhiyun kfree(buffer.pointer);
738*4882a593Smuzhiyun return status;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* pci_acpi_program_hp_params
742*4882a593Smuzhiyun *
743*4882a593Smuzhiyun * @dev - the pci_dev for which we want parameters
744*4882a593Smuzhiyun */
pci_acpi_program_hp_params(struct pci_dev * dev)745*4882a593Smuzhiyun int pci_acpi_program_hp_params(struct pci_dev *dev)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun acpi_status status;
748*4882a593Smuzhiyun acpi_handle handle, phandle;
749*4882a593Smuzhiyun struct pci_bus *pbus;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun if (acpi_pci_disabled)
752*4882a593Smuzhiyun return -ENODEV;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun handle = NULL;
755*4882a593Smuzhiyun for (pbus = dev->bus; pbus; pbus = pbus->parent) {
756*4882a593Smuzhiyun handle = acpi_pci_get_bridge_handle(pbus);
757*4882a593Smuzhiyun if (handle)
758*4882a593Smuzhiyun break;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * _HPP settings apply to all child buses, until another _HPP is
763*4882a593Smuzhiyun * encountered. If we don't find an _HPP for the input pci dev,
764*4882a593Smuzhiyun * look for it in the parent device scope since that would apply to
765*4882a593Smuzhiyun * this pci dev.
766*4882a593Smuzhiyun */
767*4882a593Smuzhiyun while (handle) {
768*4882a593Smuzhiyun status = acpi_run_hpx(dev, handle);
769*4882a593Smuzhiyun if (ACPI_SUCCESS(status))
770*4882a593Smuzhiyun return 0;
771*4882a593Smuzhiyun status = acpi_run_hpp(dev, handle);
772*4882a593Smuzhiyun if (ACPI_SUCCESS(status))
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun if (acpi_is_root_bridge(handle))
775*4882a593Smuzhiyun break;
776*4882a593Smuzhiyun status = acpi_get_parent(handle, &phandle);
777*4882a593Smuzhiyun if (ACPI_FAILURE(status))
778*4882a593Smuzhiyun break;
779*4882a593Smuzhiyun handle = phandle;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun return -ENODEV;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /**
785*4882a593Smuzhiyun * pciehp_is_native - Check whether a hotplug port is handled by the OS
786*4882a593Smuzhiyun * @bridge: Hotplug port to check
787*4882a593Smuzhiyun *
788*4882a593Smuzhiyun * Returns true if the given @bridge is handled by the native PCIe hotplug
789*4882a593Smuzhiyun * driver.
790*4882a593Smuzhiyun */
pciehp_is_native(struct pci_dev * bridge)791*4882a593Smuzhiyun bool pciehp_is_native(struct pci_dev *bridge)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun const struct pci_host_bridge *host;
794*4882a593Smuzhiyun u32 slot_cap;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
797*4882a593Smuzhiyun return false;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap);
800*4882a593Smuzhiyun if (!(slot_cap & PCI_EXP_SLTCAP_HPC))
801*4882a593Smuzhiyun return false;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (pcie_ports_native)
804*4882a593Smuzhiyun return true;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun host = pci_find_host_bridge(bridge->bus);
807*4882a593Smuzhiyun return host->native_pcie_hotplug;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /**
811*4882a593Smuzhiyun * shpchp_is_native - Check whether a hotplug port is handled by the OS
812*4882a593Smuzhiyun * @bridge: Hotplug port to check
813*4882a593Smuzhiyun *
814*4882a593Smuzhiyun * Returns true if the given @bridge is handled by the native SHPC hotplug
815*4882a593Smuzhiyun * driver.
816*4882a593Smuzhiyun */
shpchp_is_native(struct pci_dev * bridge)817*4882a593Smuzhiyun bool shpchp_is_native(struct pci_dev *bridge)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun return bridge->shpc_managed;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /**
823*4882a593Smuzhiyun * pci_acpi_wake_bus - Root bus wakeup notification fork function.
824*4882a593Smuzhiyun * @context: Device wakeup context.
825*4882a593Smuzhiyun */
pci_acpi_wake_bus(struct acpi_device_wakeup_context * context)826*4882a593Smuzhiyun static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun struct acpi_device *adev;
829*4882a593Smuzhiyun struct acpi_pci_root *root;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun adev = container_of(context, struct acpi_device, wakeup.context);
832*4882a593Smuzhiyun root = acpi_driver_data(adev);
833*4882a593Smuzhiyun pci_pme_wakeup_bus(root->bus);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun /**
837*4882a593Smuzhiyun * pci_acpi_wake_dev - PCI device wakeup notification work function.
838*4882a593Smuzhiyun * @context: Device wakeup context.
839*4882a593Smuzhiyun */
pci_acpi_wake_dev(struct acpi_device_wakeup_context * context)840*4882a593Smuzhiyun static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun struct pci_dev *pci_dev;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun pci_dev = to_pci_dev(context->dev);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun if (pci_dev->pme_poll)
847*4882a593Smuzhiyun pci_dev->pme_poll = false;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun if (pci_dev->current_state == PCI_D3cold) {
850*4882a593Smuzhiyun pci_wakeup_event(pci_dev);
851*4882a593Smuzhiyun pm_request_resume(&pci_dev->dev);
852*4882a593Smuzhiyun return;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /* Clear PME Status if set. */
856*4882a593Smuzhiyun if (pci_dev->pme_support)
857*4882a593Smuzhiyun pci_check_pme_status(pci_dev);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun pci_wakeup_event(pci_dev);
860*4882a593Smuzhiyun pm_request_resume(&pci_dev->dev);
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun pci_pme_wakeup_bus(pci_dev->subordinate);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /**
866*4882a593Smuzhiyun * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus.
867*4882a593Smuzhiyun * @dev: PCI root bridge ACPI device.
868*4882a593Smuzhiyun */
pci_acpi_add_bus_pm_notifier(struct acpi_device * dev)869*4882a593Smuzhiyun acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device.
876*4882a593Smuzhiyun * @dev: ACPI device to add the notifier for.
877*4882a593Smuzhiyun * @pci_dev: PCI device to check for the PME status if an event is signaled.
878*4882a593Smuzhiyun */
pci_acpi_add_pm_notifier(struct acpi_device * dev,struct pci_dev * pci_dev)879*4882a593Smuzhiyun acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
880*4882a593Smuzhiyun struct pci_dev *pci_dev)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /*
886*4882a593Smuzhiyun * _SxD returns the D-state with the highest power
887*4882a593Smuzhiyun * (lowest D-state number) supported in the S-state "x".
888*4882a593Smuzhiyun *
889*4882a593Smuzhiyun * If the devices does not have a _PRW
890*4882a593Smuzhiyun * (Power Resources for Wake) supporting system wakeup from "x"
891*4882a593Smuzhiyun * then the OS is free to choose a lower power (higher number
892*4882a593Smuzhiyun * D-state) than the return value from _SxD.
893*4882a593Smuzhiyun *
894*4882a593Smuzhiyun * But if _PRW is enabled at S-state "x", the OS
895*4882a593Smuzhiyun * must not choose a power lower than _SxD --
896*4882a593Smuzhiyun * unless the device has an _SxW method specifying
897*4882a593Smuzhiyun * the lowest power (highest D-state number) the device
898*4882a593Smuzhiyun * may enter while still able to wake the system.
899*4882a593Smuzhiyun *
900*4882a593Smuzhiyun * ie. depending on global OS policy:
901*4882a593Smuzhiyun *
902*4882a593Smuzhiyun * if (_PRW at S-state x)
903*4882a593Smuzhiyun * choose from highest power _SxD to lowest power _SxW
904*4882a593Smuzhiyun * else // no _PRW at S-state x
905*4882a593Smuzhiyun * choose highest power _SxD or any lower power
906*4882a593Smuzhiyun */
907*4882a593Smuzhiyun
acpi_pci_choose_state(struct pci_dev * pdev)908*4882a593Smuzhiyun static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun int acpi_state, d_max;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun if (pdev->no_d3cold)
913*4882a593Smuzhiyun d_max = ACPI_STATE_D3_HOT;
914*4882a593Smuzhiyun else
915*4882a593Smuzhiyun d_max = ACPI_STATE_D3_COLD;
916*4882a593Smuzhiyun acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
917*4882a593Smuzhiyun if (acpi_state < 0)
918*4882a593Smuzhiyun return PCI_POWER_ERROR;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun switch (acpi_state) {
921*4882a593Smuzhiyun case ACPI_STATE_D0:
922*4882a593Smuzhiyun return PCI_D0;
923*4882a593Smuzhiyun case ACPI_STATE_D1:
924*4882a593Smuzhiyun return PCI_D1;
925*4882a593Smuzhiyun case ACPI_STATE_D2:
926*4882a593Smuzhiyun return PCI_D2;
927*4882a593Smuzhiyun case ACPI_STATE_D3_HOT:
928*4882a593Smuzhiyun return PCI_D3hot;
929*4882a593Smuzhiyun case ACPI_STATE_D3_COLD:
930*4882a593Smuzhiyun return PCI_D3cold;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun return PCI_POWER_ERROR;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun static struct acpi_device *acpi_pci_find_companion(struct device *dev);
936*4882a593Smuzhiyun
acpi_pci_bridge_d3(struct pci_dev * dev)937*4882a593Smuzhiyun static bool acpi_pci_bridge_d3(struct pci_dev *dev)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun const struct fwnode_handle *fwnode;
940*4882a593Smuzhiyun struct acpi_device *adev;
941*4882a593Smuzhiyun struct pci_dev *root;
942*4882a593Smuzhiyun u8 val;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (!dev->is_hotplug_bridge)
945*4882a593Smuzhiyun return false;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Assume D3 support if the bridge is power-manageable by ACPI. */
948*4882a593Smuzhiyun adev = ACPI_COMPANION(&dev->dev);
949*4882a593Smuzhiyun if (!adev && !pci_dev_is_added(dev)) {
950*4882a593Smuzhiyun adev = acpi_pci_find_companion(&dev->dev);
951*4882a593Smuzhiyun ACPI_COMPANION_SET(&dev->dev, adev);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (adev && acpi_device_power_manageable(adev))
955*4882a593Smuzhiyun return true;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /*
958*4882a593Smuzhiyun * Look for a special _DSD property for the root port and if it
959*4882a593Smuzhiyun * is set we know the hierarchy behind it supports D3 just fine.
960*4882a593Smuzhiyun */
961*4882a593Smuzhiyun root = pcie_find_root_port(dev);
962*4882a593Smuzhiyun if (!root)
963*4882a593Smuzhiyun return false;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun adev = ACPI_COMPANION(&root->dev);
966*4882a593Smuzhiyun if (root == dev) {
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * It is possible that the ACPI companion is not yet bound
969*4882a593Smuzhiyun * for the root port so look it up manually here.
970*4882a593Smuzhiyun */
971*4882a593Smuzhiyun if (!adev && !pci_dev_is_added(root))
972*4882a593Smuzhiyun adev = acpi_pci_find_companion(&root->dev);
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun if (!adev)
976*4882a593Smuzhiyun return false;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun fwnode = acpi_fwnode_handle(adev);
979*4882a593Smuzhiyun if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
980*4882a593Smuzhiyun return false;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun return val == 1;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
acpi_pci_power_manageable(struct pci_dev * dev)985*4882a593Smuzhiyun static bool acpi_pci_power_manageable(struct pci_dev *dev)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
988*4882a593Smuzhiyun return adev ? acpi_device_power_manageable(adev) : false;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
acpi_pci_set_power_state(struct pci_dev * dev,pci_power_t state)991*4882a593Smuzhiyun static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
994*4882a593Smuzhiyun static const u8 state_conv[] = {
995*4882a593Smuzhiyun [PCI_D0] = ACPI_STATE_D0,
996*4882a593Smuzhiyun [PCI_D1] = ACPI_STATE_D1,
997*4882a593Smuzhiyun [PCI_D2] = ACPI_STATE_D2,
998*4882a593Smuzhiyun [PCI_D3hot] = ACPI_STATE_D3_HOT,
999*4882a593Smuzhiyun [PCI_D3cold] = ACPI_STATE_D3_COLD,
1000*4882a593Smuzhiyun };
1001*4882a593Smuzhiyun int error = -EINVAL;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* If the ACPI device has _EJ0, ignore the device */
1004*4882a593Smuzhiyun if (!adev || acpi_has_method(adev->handle, "_EJ0"))
1005*4882a593Smuzhiyun return -ENODEV;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun switch (state) {
1008*4882a593Smuzhiyun case PCI_D3cold:
1009*4882a593Smuzhiyun if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) ==
1010*4882a593Smuzhiyun PM_QOS_FLAGS_ALL) {
1011*4882a593Smuzhiyun error = -EBUSY;
1012*4882a593Smuzhiyun break;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun fallthrough;
1015*4882a593Smuzhiyun case PCI_D0:
1016*4882a593Smuzhiyun case PCI_D1:
1017*4882a593Smuzhiyun case PCI_D2:
1018*4882a593Smuzhiyun case PCI_D3hot:
1019*4882a593Smuzhiyun error = acpi_device_set_power(adev, state_conv[state]);
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (!error)
1023*4882a593Smuzhiyun pci_dbg(dev, "power state changed by ACPI to %s\n",
1024*4882a593Smuzhiyun acpi_power_state_string(state_conv[state]));
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return error;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
acpi_pci_get_power_state(struct pci_dev * dev)1029*4882a593Smuzhiyun static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1032*4882a593Smuzhiyun static const pci_power_t state_conv[] = {
1033*4882a593Smuzhiyun [ACPI_STATE_D0] = PCI_D0,
1034*4882a593Smuzhiyun [ACPI_STATE_D1] = PCI_D1,
1035*4882a593Smuzhiyun [ACPI_STATE_D2] = PCI_D2,
1036*4882a593Smuzhiyun [ACPI_STATE_D3_HOT] = PCI_D3hot,
1037*4882a593Smuzhiyun [ACPI_STATE_D3_COLD] = PCI_D3cold,
1038*4882a593Smuzhiyun };
1039*4882a593Smuzhiyun int state;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (!adev || !acpi_device_power_manageable(adev))
1042*4882a593Smuzhiyun return PCI_UNKNOWN;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun state = adev->power.state;
1045*4882a593Smuzhiyun if (state == ACPI_STATE_UNKNOWN)
1046*4882a593Smuzhiyun return PCI_UNKNOWN;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun return state_conv[state];
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
acpi_pci_refresh_power_state(struct pci_dev * dev)1051*4882a593Smuzhiyun static void acpi_pci_refresh_power_state(struct pci_dev *dev)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun if (adev && acpi_device_power_manageable(adev))
1056*4882a593Smuzhiyun acpi_device_update_power(adev, NULL);
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun
acpi_pci_propagate_wakeup(struct pci_bus * bus,bool enable)1059*4882a593Smuzhiyun static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun while (bus->parent) {
1062*4882a593Smuzhiyun if (acpi_pm_device_can_wakeup(&bus->self->dev))
1063*4882a593Smuzhiyun return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun bus = bus->parent;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* We have reached the root bus. */
1069*4882a593Smuzhiyun if (bus->bridge) {
1070*4882a593Smuzhiyun if (acpi_pm_device_can_wakeup(bus->bridge))
1071*4882a593Smuzhiyun return acpi_pm_set_device_wakeup(bus->bridge, enable);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun return 0;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
acpi_pci_wakeup(struct pci_dev * dev,bool enable)1076*4882a593Smuzhiyun static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun if (acpi_pm_device_can_wakeup(&dev->dev))
1079*4882a593Smuzhiyun return acpi_pm_set_device_wakeup(&dev->dev, enable);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun return acpi_pci_propagate_wakeup(dev->bus, enable);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
acpi_pci_need_resume(struct pci_dev * dev)1084*4882a593Smuzhiyun static bool acpi_pci_need_resume(struct pci_dev *dev)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun /*
1089*4882a593Smuzhiyun * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
1090*4882a593Smuzhiyun * system-wide suspend/resume confuses the platform firmware, so avoid
1091*4882a593Smuzhiyun * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint
1092*4882a593Smuzhiyun * devices are expected to be in D3 before invoking the S3 entry path
1093*4882a593Smuzhiyun * from the firmware, so they should not be affected by this issue.
1094*4882a593Smuzhiyun */
1095*4882a593Smuzhiyun if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
1096*4882a593Smuzhiyun return true;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun if (!adev || !acpi_device_power_manageable(adev))
1099*4882a593Smuzhiyun return false;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if (adev->wakeup.flags.valid &&
1102*4882a593Smuzhiyun device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
1103*4882a593Smuzhiyun return true;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun if (acpi_target_system_state() == ACPI_STATE_S0)
1106*4882a593Smuzhiyun return false;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun return !!adev->power.flags.dsw_present;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
1112*4882a593Smuzhiyun .bridge_d3 = acpi_pci_bridge_d3,
1113*4882a593Smuzhiyun .is_manageable = acpi_pci_power_manageable,
1114*4882a593Smuzhiyun .set_state = acpi_pci_set_power_state,
1115*4882a593Smuzhiyun .get_state = acpi_pci_get_power_state,
1116*4882a593Smuzhiyun .refresh_state = acpi_pci_refresh_power_state,
1117*4882a593Smuzhiyun .choose_state = acpi_pci_choose_state,
1118*4882a593Smuzhiyun .set_wakeup = acpi_pci_wakeup,
1119*4882a593Smuzhiyun .need_resume = acpi_pci_need_resume,
1120*4882a593Smuzhiyun };
1121*4882a593Smuzhiyun
acpi_pci_add_bus(struct pci_bus * bus)1122*4882a593Smuzhiyun void acpi_pci_add_bus(struct pci_bus *bus)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun union acpi_object *obj;
1125*4882a593Smuzhiyun struct pci_host_bridge *bridge;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
1128*4882a593Smuzhiyun return;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun acpi_pci_slot_enumerate(bus);
1131*4882a593Smuzhiyun acpiphp_enumerate_slots(bus);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * For a host bridge, check its _DSM for function 8 and if
1135*4882a593Smuzhiyun * that is available, mark it in pci_host_bridge.
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun if (!pci_is_root_bus(bus))
1138*4882a593Smuzhiyun return;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3,
1141*4882a593Smuzhiyun DSM_PCI_POWER_ON_RESET_DELAY, NULL);
1142*4882a593Smuzhiyun if (!obj)
1143*4882a593Smuzhiyun return;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) {
1146*4882a593Smuzhiyun bridge = pci_find_host_bridge(bus);
1147*4882a593Smuzhiyun bridge->ignore_reset_delay = 1;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun ACPI_FREE(obj);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
acpi_pci_remove_bus(struct pci_bus * bus)1152*4882a593Smuzhiyun void acpi_pci_remove_bus(struct pci_bus *bus)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun if (acpi_pci_disabled || !bus->bridge)
1155*4882a593Smuzhiyun return;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun acpiphp_remove_slots(bus);
1158*4882a593Smuzhiyun acpi_pci_slot_remove(bus);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /* ACPI bus type */
acpi_pci_find_companion(struct device * dev)1162*4882a593Smuzhiyun static struct acpi_device *acpi_pci_find_companion(struct device *dev)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(dev);
1165*4882a593Smuzhiyun bool check_children;
1166*4882a593Smuzhiyun u64 addr;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun check_children = pci_is_bridge(pci_dev);
1169*4882a593Smuzhiyun /* Please ref to ACPI spec for the syntax of _ADR */
1170*4882a593Smuzhiyun addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
1171*4882a593Smuzhiyun return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr,
1172*4882a593Smuzhiyun check_children);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /**
1176*4882a593Smuzhiyun * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI
1177*4882a593Smuzhiyun * @pdev: the PCI device whose delay is to be updated
1178*4882a593Smuzhiyun * @handle: ACPI handle of this device
1179*4882a593Smuzhiyun *
1180*4882a593Smuzhiyun * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
1181*4882a593Smuzhiyun * control method of either the device itself or the PCI host bridge.
1182*4882a593Smuzhiyun *
1183*4882a593Smuzhiyun * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
1184*4882a593Smuzhiyun * host bridge. If it returns one, the OS may assume that all devices in
1185*4882a593Smuzhiyun * the hierarchy have already completed power-on reset delays.
1186*4882a593Smuzhiyun *
1187*4882a593Smuzhiyun * Function 9, "Device Readiness Durations," applies only to the object
1188*4882a593Smuzhiyun * where it is located. It returns delay durations required after various
1189*4882a593Smuzhiyun * events if the device requires less time than the spec requires. Delays
1190*4882a593Smuzhiyun * from this function take precedence over the Reset Delay function.
1191*4882a593Smuzhiyun *
1192*4882a593Smuzhiyun * These _DSM functions are defined by the draft ECN of January 28, 2014,
1193*4882a593Smuzhiyun * titled "ACPI additions for FW latency optimizations."
1194*4882a593Smuzhiyun */
pci_acpi_optimize_delay(struct pci_dev * pdev,acpi_handle handle)1195*4882a593Smuzhiyun static void pci_acpi_optimize_delay(struct pci_dev *pdev,
1196*4882a593Smuzhiyun acpi_handle handle)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
1199*4882a593Smuzhiyun int value;
1200*4882a593Smuzhiyun union acpi_object *obj, *elements;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun if (bridge->ignore_reset_delay)
1203*4882a593Smuzhiyun pdev->d3cold_delay = 0;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3,
1206*4882a593Smuzhiyun DSM_PCI_DEVICE_READINESS_DURATIONS, NULL);
1207*4882a593Smuzhiyun if (!obj)
1208*4882a593Smuzhiyun return;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) {
1211*4882a593Smuzhiyun elements = obj->package.elements;
1212*4882a593Smuzhiyun if (elements[0].type == ACPI_TYPE_INTEGER) {
1213*4882a593Smuzhiyun value = (int)elements[0].integer.value / 1000;
1214*4882a593Smuzhiyun if (value < PCI_PM_D3COLD_WAIT)
1215*4882a593Smuzhiyun pdev->d3cold_delay = value;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun if (elements[3].type == ACPI_TYPE_INTEGER) {
1218*4882a593Smuzhiyun value = (int)elements[3].integer.value / 1000;
1219*4882a593Smuzhiyun if (value < PCI_PM_D3HOT_WAIT)
1220*4882a593Smuzhiyun pdev->d3hot_delay = value;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun ACPI_FREE(obj);
1224*4882a593Smuzhiyun }
1225*4882a593Smuzhiyun
pci_acpi_set_external_facing(struct pci_dev * dev)1226*4882a593Smuzhiyun static void pci_acpi_set_external_facing(struct pci_dev *dev)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun u8 val;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1231*4882a593Smuzhiyun return;
1232*4882a593Smuzhiyun if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val))
1233*4882a593Smuzhiyun return;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun /*
1236*4882a593Smuzhiyun * These root ports expose PCIe (including DMA) outside of the
1237*4882a593Smuzhiyun * system. Everything downstream from them is external.
1238*4882a593Smuzhiyun */
1239*4882a593Smuzhiyun if (val)
1240*4882a593Smuzhiyun dev->external_facing = 1;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
pci_acpi_setup(struct device * dev)1243*4882a593Smuzhiyun static void pci_acpi_setup(struct device *dev)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(dev);
1246*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(dev);
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun if (!adev)
1249*4882a593Smuzhiyun return;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun pci_acpi_optimize_delay(pci_dev, adev->handle);
1252*4882a593Smuzhiyun pci_acpi_set_external_facing(pci_dev);
1253*4882a593Smuzhiyun pci_acpi_add_edr_notifier(pci_dev);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun pci_acpi_add_pm_notifier(adev, pci_dev);
1256*4882a593Smuzhiyun if (!adev->wakeup.flags.valid)
1257*4882a593Smuzhiyun return;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun device_set_wakeup_capable(dev, true);
1260*4882a593Smuzhiyun /*
1261*4882a593Smuzhiyun * For bridges that can do D3 we enable wake automatically (as
1262*4882a593Smuzhiyun * we do for the power management itself in that case). The
1263*4882a593Smuzhiyun * reason is that the bridge may have additional methods such as
1264*4882a593Smuzhiyun * _DSW that need to be called.
1265*4882a593Smuzhiyun */
1266*4882a593Smuzhiyun if (pci_dev->bridge_d3)
1267*4882a593Smuzhiyun device_wakeup_enable(dev);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun acpi_pci_wakeup(pci_dev, false);
1270*4882a593Smuzhiyun acpi_device_power_add_dependent(adev, dev);
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
pci_acpi_cleanup(struct device * dev)1273*4882a593Smuzhiyun static void pci_acpi_cleanup(struct device *dev)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun struct acpi_device *adev = ACPI_COMPANION(dev);
1276*4882a593Smuzhiyun struct pci_dev *pci_dev = to_pci_dev(dev);
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun if (!adev)
1279*4882a593Smuzhiyun return;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun pci_acpi_remove_edr_notifier(pci_dev);
1282*4882a593Smuzhiyun pci_acpi_remove_pm_notifier(adev);
1283*4882a593Smuzhiyun if (adev->wakeup.flags.valid) {
1284*4882a593Smuzhiyun acpi_device_power_remove_dependent(adev, dev);
1285*4882a593Smuzhiyun if (pci_dev->bridge_d3)
1286*4882a593Smuzhiyun device_wakeup_disable(dev);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun device_set_wakeup_capable(dev, false);
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
pci_acpi_bus_match(struct device * dev)1292*4882a593Smuzhiyun static bool pci_acpi_bus_match(struct device *dev)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun return dev_is_pci(dev);
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun static struct acpi_bus_type acpi_pci_bus = {
1298*4882a593Smuzhiyun .name = "PCI",
1299*4882a593Smuzhiyun .match = pci_acpi_bus_match,
1300*4882a593Smuzhiyun .find_companion = acpi_pci_find_companion,
1301*4882a593Smuzhiyun .setup = pci_acpi_setup,
1302*4882a593Smuzhiyun .cleanup = pci_acpi_cleanup,
1303*4882a593Smuzhiyun };
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /**
1309*4882a593Smuzhiyun * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode
1310*4882a593Smuzhiyun * @fn: Callback matching a device to a fwnode that identifies a PCI
1311*4882a593Smuzhiyun * MSI domain.
1312*4882a593Smuzhiyun *
1313*4882a593Smuzhiyun * This should be called by irqchip driver, which is the parent of
1314*4882a593Smuzhiyun * the MSI domain to provide callback interface to query fwnode.
1315*4882a593Smuzhiyun */
1316*4882a593Smuzhiyun void
pci_msi_register_fwnode_provider(struct fwnode_handle * (* fn)(struct device *))1317*4882a593Smuzhiyun pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *))
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun pci_msi_get_fwnode_cb = fn;
1320*4882a593Smuzhiyun }
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun /**
1323*4882a593Smuzhiyun * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge
1324*4882a593Smuzhiyun * @bus: The PCI host bridge bus.
1325*4882a593Smuzhiyun *
1326*4882a593Smuzhiyun * This function uses the callback function registered by
1327*4882a593Smuzhiyun * pci_msi_register_fwnode_provider() to retrieve the irq_domain with
1328*4882a593Smuzhiyun * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus.
1329*4882a593Smuzhiyun * This returns NULL on error or when the domain is not found.
1330*4882a593Smuzhiyun */
pci_host_bridge_acpi_msi_domain(struct pci_bus * bus)1331*4882a593Smuzhiyun struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun struct fwnode_handle *fwnode;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (!pci_msi_get_fwnode_cb)
1336*4882a593Smuzhiyun return NULL;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun fwnode = pci_msi_get_fwnode_cb(&bus->dev);
1339*4882a593Smuzhiyun if (!fwnode)
1340*4882a593Smuzhiyun return NULL;
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI);
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
acpi_pci_init(void)1345*4882a593Smuzhiyun static int __init acpi_pci_init(void)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun int ret;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
1350*4882a593Smuzhiyun pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
1351*4882a593Smuzhiyun pci_no_msi();
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
1355*4882a593Smuzhiyun pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
1356*4882a593Smuzhiyun pcie_no_aspm();
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun ret = register_acpi_bus_type(&acpi_pci_bus);
1360*4882a593Smuzhiyun if (ret)
1361*4882a593Smuzhiyun return 0;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun pci_set_platform_pm(&acpi_pci_platform_pm);
1364*4882a593Smuzhiyun acpi_pci_slot_init();
1365*4882a593Smuzhiyun acpiphp_init();
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun return 0;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun arch_initcall(acpi_pci_init);
1370