1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Minimal wrappers to allow compiling igb_uio on older kernels.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef RHEL_RELEASE_VERSION
7*4882a593Smuzhiyun #define RHEL_RELEASE_VERSION(a, b) (((a) << 8) + (b))
8*4882a593Smuzhiyun #endif
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
11*4882a593Smuzhiyun #define pci_cfg_access_lock pci_block_user_cfg_access
12*4882a593Smuzhiyun #define pci_cfg_access_unlock pci_unblock_user_cfg_access
13*4882a593Smuzhiyun #endif
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
16*4882a593Smuzhiyun #define HAVE_PTE_MASK_PAGE_IOMAP
17*4882a593Smuzhiyun #endif
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #ifndef PCI_MSIX_ENTRY_SIZE
20*4882a593Smuzhiyun #define PCI_MSIX_ENTRY_SIZE 16
21*4882a593Smuzhiyun #define PCI_MSIX_ENTRY_VECTOR_CTRL 12
22*4882a593Smuzhiyun #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * for kernels < 2.6.38 and backported patch that moves MSI-X entry definition
27*4882a593Smuzhiyun * to pci_regs.h Those kernels has PCI_MSIX_ENTRY_SIZE defined but not
28*4882a593Smuzhiyun * PCI_MSIX_ENTRY_CTRL_MASKBIT
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun #ifndef PCI_MSIX_ENTRY_CTRL_MASKBIT
31*4882a593Smuzhiyun #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
32*4882a593Smuzhiyun #endif
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34) && \
35*4882a593Smuzhiyun (!(defined(RHEL_RELEASE_CODE) && \
36*4882a593Smuzhiyun RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 9)))
37*4882a593Smuzhiyun
pci_num_vf(struct pci_dev * dev)38*4882a593Smuzhiyun static int pci_num_vf(struct pci_dev *dev)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct iov {
41*4882a593Smuzhiyun int pos;
42*4882a593Smuzhiyun int nres;
43*4882a593Smuzhiyun u32 cap;
44*4882a593Smuzhiyun u16 ctrl;
45*4882a593Smuzhiyun u16 total;
46*4882a593Smuzhiyun u16 initial;
47*4882a593Smuzhiyun u16 nr_virtfn;
48*4882a593Smuzhiyun } *iov = (struct iov *)dev->sriov;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (!dev->is_physfn)
51*4882a593Smuzhiyun return 0;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return iov->nr_virtfn;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #endif /* < 2.6.34 */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) && \
59*4882a593Smuzhiyun (!(defined(RHEL_RELEASE_CODE) && \
60*4882a593Smuzhiyun RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 4)))
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define kstrtoul strict_strtoul
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #endif /* < 2.6.39 */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0) && \
67*4882a593Smuzhiyun (!(defined(RHEL_RELEASE_CODE) && \
68*4882a593Smuzhiyun RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 3)))
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Check if INTX works to control irq's.
71*4882a593Smuzhiyun * Set's INTX_DISABLE flag and reads it back
72*4882a593Smuzhiyun */
pci_intx_mask_supported(struct pci_dev * pdev)73*4882a593Smuzhiyun static bool pci_intx_mask_supported(struct pci_dev *pdev)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun bool mask_supported = false;
76*4882a593Smuzhiyun uint16_t orig, new;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun pci_block_user_cfg_access(pdev);
79*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &orig);
80*4882a593Smuzhiyun pci_write_config_word(pdev, PCI_COMMAND,
81*4882a593Smuzhiyun orig ^ PCI_COMMAND_INTX_DISABLE);
82*4882a593Smuzhiyun pci_read_config_word(pdev, PCI_COMMAND, &new);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
85*4882a593Smuzhiyun dev_err(&pdev->dev, "Command register changed from "
86*4882a593Smuzhiyun "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
87*4882a593Smuzhiyun } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
88*4882a593Smuzhiyun mask_supported = true;
89*4882a593Smuzhiyun pci_write_config_word(pdev, PCI_COMMAND, orig);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun pci_unblock_user_cfg_access(pdev);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return mask_supported;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
pci_check_and_mask_intx(struct pci_dev * pdev)96*4882a593Smuzhiyun static bool pci_check_and_mask_intx(struct pci_dev *pdev)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun bool pending;
99*4882a593Smuzhiyun uint32_t status;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun pci_block_user_cfg_access(pdev);
102*4882a593Smuzhiyun pci_read_config_dword(pdev, PCI_COMMAND, &status);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* interrupt is not ours, goes to out */
105*4882a593Smuzhiyun pending = (((status >> 16) & PCI_STATUS_INTERRUPT) != 0);
106*4882a593Smuzhiyun if (pending) {
107*4882a593Smuzhiyun uint16_t old, new;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun old = status;
110*4882a593Smuzhiyun if (status != 0)
111*4882a593Smuzhiyun new = old & (~PCI_COMMAND_INTX_DISABLE);
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun new = old | PCI_COMMAND_INTX_DISABLE;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (old != new)
116*4882a593Smuzhiyun pci_write_config_word(pdev, PCI_COMMAND, new);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun pci_unblock_user_cfg_access(pdev);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return pending;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun #endif /* < 3.3.0 */
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
126*4882a593Smuzhiyun #define HAVE_PCI_IS_BRIDGE_API 1
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
130*4882a593Smuzhiyun #define HAVE_MSI_LIST_IN_GENERIC_DEVICE 1
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
134*4882a593Smuzhiyun #define HAVE_PCI_MSI_MASK_IRQ 1
135*4882a593Smuzhiyun #endif
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
138*4882a593Smuzhiyun #define HAVE_ALLOC_IRQ_VECTORS 1
139*4882a593Smuzhiyun #endif
140*4882a593Smuzhiyun
igbuio_kernel_is_locked_down(void)141*4882a593Smuzhiyun static inline bool igbuio_kernel_is_locked_down(void)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun #ifdef CONFIG_LOCK_DOWN_KERNEL
144*4882a593Smuzhiyun #ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT
145*4882a593Smuzhiyun return kernel_is_locked_down(NULL);
146*4882a593Smuzhiyun #elif defined(CONFIG_EFI_SECURE_BOOT_LOCK_DOWN)
147*4882a593Smuzhiyun return kernel_is_locked_down();
148*4882a593Smuzhiyun #else
149*4882a593Smuzhiyun return false;
150*4882a593Smuzhiyun #endif
151*4882a593Smuzhiyun #else
152*4882a593Smuzhiyun return false;
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun #ifndef fallthrough
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #ifndef __has_attribute
159*4882a593Smuzhiyun #define __has_attribute(x) 0
160*4882a593Smuzhiyun #endif
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun #if __has_attribute(__fallthrough__)
163*4882a593Smuzhiyun #define fallthrough __attribute__((__fallthrough__))
164*4882a593Smuzhiyun #else
165*4882a593Smuzhiyun #define fallthrough do {} while (0) /* fallthrough */
166*4882a593Smuzhiyun #endif
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #endif
169