1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright 2016-2019 HabanaLabs, Ltd.
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "habanalabs.h"
9*4882a593Smuzhiyun #include "../include/hw_ip/pci/pci_general.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 10)
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
16*4882a593Smuzhiyun #define IATU_REGION_CTRL_MATCH_MODE_MASK BIT(30)
17*4882a593Smuzhiyun #define IATU_REGION_CTRL_NUM_MATCH_EN_MASK BIT(19)
18*4882a593Smuzhiyun #define IATU_REGION_CTRL_BAR_NUM_MASK GENMASK(10, 8)
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun * hl_pci_bars_map() - Map PCI BARs.
22*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
23*4882a593Smuzhiyun * @name: Array of BAR names.
24*4882a593Smuzhiyun * @is_wc: Array with flag per BAR whether a write-combined mapping is needed.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Request PCI regions and map them to kernel virtual addresses.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Return: 0 on success, non-zero for failure.
29*4882a593Smuzhiyun */
hl_pci_bars_map(struct hl_device * hdev,const char * const name[3],bool is_wc[3])30*4882a593Smuzhiyun int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
31*4882a593Smuzhiyun bool is_wc[3])
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
34*4882a593Smuzhiyun int rc, i, bar;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun rc = pci_request_regions(pdev, HL_NAME);
37*4882a593Smuzhiyun if (rc) {
38*4882a593Smuzhiyun dev_err(hdev->dev, "Cannot obtain PCI resources\n");
39*4882a593Smuzhiyun return rc;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun for (i = 0 ; i < 3 ; i++) {
43*4882a593Smuzhiyun bar = i * 2; /* 64-bit BARs */
44*4882a593Smuzhiyun hdev->pcie_bar[bar] = is_wc[i] ?
45*4882a593Smuzhiyun pci_ioremap_wc_bar(pdev, bar) :
46*4882a593Smuzhiyun pci_ioremap_bar(pdev, bar);
47*4882a593Smuzhiyun if (!hdev->pcie_bar[bar]) {
48*4882a593Smuzhiyun dev_err(hdev->dev, "pci_ioremap%s_bar failed for %s\n",
49*4882a593Smuzhiyun is_wc[i] ? "_wc" : "", name[i]);
50*4882a593Smuzhiyun rc = -ENODEV;
51*4882a593Smuzhiyun goto err;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun return 0;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun err:
58*4882a593Smuzhiyun for (i = 2 ; i >= 0 ; i--) {
59*4882a593Smuzhiyun bar = i * 2; /* 64-bit BARs */
60*4882a593Smuzhiyun if (hdev->pcie_bar[bar])
61*4882a593Smuzhiyun iounmap(hdev->pcie_bar[bar]);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun pci_release_regions(pdev);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return rc;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /**
70*4882a593Smuzhiyun * hl_pci_bars_unmap() - Unmap PCI BARS.
71*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * Release all PCI BARs and unmap their virtual addresses.
74*4882a593Smuzhiyun */
hl_pci_bars_unmap(struct hl_device * hdev)75*4882a593Smuzhiyun static void hl_pci_bars_unmap(struct hl_device *hdev)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
78*4882a593Smuzhiyun int i, bar;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun for (i = 2 ; i >= 0 ; i--) {
81*4882a593Smuzhiyun bar = i * 2; /* 64-bit BARs */
82*4882a593Smuzhiyun iounmap(hdev->pcie_bar[bar]);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun pci_release_regions(pdev);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun * hl_pci_elbi_write() - Write through the ELBI interface.
90*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
91*4882a593Smuzhiyun * @addr: Address to write to
92*4882a593Smuzhiyun * @data: Data to write
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * Return: 0 on success, negative value for failure.
95*4882a593Smuzhiyun */
hl_pci_elbi_write(struct hl_device * hdev,u64 addr,u32 data)96*4882a593Smuzhiyun static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
99*4882a593Smuzhiyun ktime_t timeout;
100*4882a593Smuzhiyun u64 msec;
101*4882a593Smuzhiyun u32 val;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (hdev->pldm)
104*4882a593Smuzhiyun msec = HL_PLDM_PCI_ELBI_TIMEOUT_MSEC;
105*4882a593Smuzhiyun else
106*4882a593Smuzhiyun msec = HL_PCI_ELBI_TIMEOUT_MSEC;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Clear previous status */
109*4882a593Smuzhiyun pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr);
112*4882a593Smuzhiyun pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
113*4882a593Smuzhiyun pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL,
114*4882a593Smuzhiyun PCI_CONFIG_ELBI_CTRL_WRITE);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun timeout = ktime_add_ms(ktime_get(), msec);
117*4882a593Smuzhiyun for (;;) {
118*4882a593Smuzhiyun pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val);
119*4882a593Smuzhiyun if (val & PCI_CONFIG_ELBI_STS_MASK)
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun if (ktime_compare(ktime_get(), timeout) > 0) {
122*4882a593Smuzhiyun pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS,
123*4882a593Smuzhiyun &val);
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun usleep_range(300, 500);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
131*4882a593Smuzhiyun return 0;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (val & PCI_CONFIG_ELBI_STS_ERR)
134*4882a593Smuzhiyun return -EIO;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
137*4882a593Smuzhiyun dev_err(hdev->dev, "ELBI write didn't finish in time\n");
138*4882a593Smuzhiyun return -EIO;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun dev_err(hdev->dev, "ELBI write has undefined bits in status\n");
142*4882a593Smuzhiyun return -EIO;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * hl_pci_iatu_write() - iatu write routine.
147*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
148*4882a593Smuzhiyun * @addr: Address to write to
149*4882a593Smuzhiyun * @data: Data to write
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Return: 0 on success, negative value for failure.
152*4882a593Smuzhiyun */
hl_pci_iatu_write(struct hl_device * hdev,u32 addr,u32 data)153*4882a593Smuzhiyun int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
156*4882a593Smuzhiyun u32 dbi_offset;
157*4882a593Smuzhiyun int rc;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun dbi_offset = addr & 0xFFF;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
162*4882a593Smuzhiyun * in case the firmware security is enabled
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
167*4882a593Smuzhiyun data);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (rc)
170*4882a593Smuzhiyun return -EIO;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun * hl_pci_reset_link_through_bridge() - Reset PCI link.
177*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
178*4882a593Smuzhiyun */
hl_pci_reset_link_through_bridge(struct hl_device * hdev)179*4882a593Smuzhiyun static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
182*4882a593Smuzhiyun struct pci_dev *parent_port;
183*4882a593Smuzhiyun u16 val;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun parent_port = pdev->bus->self;
186*4882a593Smuzhiyun pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
187*4882a593Smuzhiyun val |= PCI_BRIDGE_CTL_BUS_RESET;
188*4882a593Smuzhiyun pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
189*4882a593Smuzhiyun ssleep(1);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
192*4882a593Smuzhiyun pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
193*4882a593Smuzhiyun ssleep(3);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * hl_pci_set_inbound_region() - Configure inbound region
198*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
199*4882a593Smuzhiyun * @region: Inbound region number.
200*4882a593Smuzhiyun * @pci_region: Inbound region parameters.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Configure the iATU inbound region.
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * Return: 0 on success, negative value for failure.
205*4882a593Smuzhiyun */
hl_pci_set_inbound_region(struct hl_device * hdev,u8 region,struct hl_inbound_pci_region * pci_region)206*4882a593Smuzhiyun int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
207*4882a593Smuzhiyun struct hl_inbound_pci_region *pci_region)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
210*4882a593Smuzhiyun u64 bar_phys_base, region_base, region_end_address;
211*4882a593Smuzhiyun u32 offset, ctrl_reg_val;
212*4882a593Smuzhiyun int rc = 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* region offset */
215*4882a593Smuzhiyun offset = (0x200 * region) + 0x100;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (pci_region->mode == PCI_ADDRESS_MATCH_MODE) {
218*4882a593Smuzhiyun bar_phys_base = hdev->pcie_bar_phys[pci_region->bar];
219*4882a593Smuzhiyun region_base = bar_phys_base + pci_region->offset_in_bar;
220*4882a593Smuzhiyun region_end_address = region_base + pci_region->size - 1;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x8,
223*4882a593Smuzhiyun lower_32_bits(region_base));
224*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0xC,
225*4882a593Smuzhiyun upper_32_bits(region_base));
226*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x10,
227*4882a593Smuzhiyun lower_32_bits(region_end_address));
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* Point to the specified address */
231*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x14,
232*4882a593Smuzhiyun lower_32_bits(pci_region->addr));
233*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x18,
234*4882a593Smuzhiyun upper_32_bits(pci_region->addr));
235*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Enable + bar/address match + match enable + bar number */
238*4882a593Smuzhiyun ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
239*4882a593Smuzhiyun ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
240*4882a593Smuzhiyun pci_region->mode);
241*4882a593Smuzhiyun ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (pci_region->mode == PCI_BAR_MATCH_MODE)
244*4882a593Smuzhiyun ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
245*4882a593Smuzhiyun pci_region->bar);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Return the DBI window to the default location
250*4882a593Smuzhiyun * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
251*4882a593Smuzhiyun * in case the firmware security is enabled
252*4882a593Smuzhiyun */
253*4882a593Smuzhiyun hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (rc)
256*4882a593Smuzhiyun dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
257*4882a593Smuzhiyun pci_region->bar, pci_region->addr);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return rc;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /**
263*4882a593Smuzhiyun * hl_pci_set_outbound_region() - Configure outbound region 0
264*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
265*4882a593Smuzhiyun * @pci_region: Outbound region parameters.
266*4882a593Smuzhiyun *
267*4882a593Smuzhiyun * Configure the iATU outbound region 0.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * Return: 0 on success, negative value for failure.
270*4882a593Smuzhiyun */
hl_pci_set_outbound_region(struct hl_device * hdev,struct hl_outbound_pci_region * pci_region)271*4882a593Smuzhiyun int hl_pci_set_outbound_region(struct hl_device *hdev,
272*4882a593Smuzhiyun struct hl_outbound_pci_region *pci_region)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
275*4882a593Smuzhiyun u64 outbound_region_end_address;
276*4882a593Smuzhiyun int rc = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Outbound Region 0 */
279*4882a593Smuzhiyun outbound_region_end_address =
280*4882a593Smuzhiyun pci_region->addr + pci_region->size - 1;
281*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x008,
282*4882a593Smuzhiyun lower_32_bits(pci_region->addr));
283*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x00C,
284*4882a593Smuzhiyun upper_32_bits(pci_region->addr));
285*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x010,
286*4882a593Smuzhiyun lower_32_bits(outbound_region_end_address));
287*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x014, 0);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if ((hdev->power9_64bit_dma_enable) && (hdev->dma_mask == 64))
290*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x018, 0x08000000);
291*4882a593Smuzhiyun else
292*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x018, 0);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x020,
295*4882a593Smuzhiyun upper_32_bits(outbound_region_end_address));
296*4882a593Smuzhiyun /* Increase region size */
297*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x000, 0x00002000);
298*4882a593Smuzhiyun /* Enable */
299*4882a593Smuzhiyun rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Return the DBI window to the default location
302*4882a593Smuzhiyun * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
303*4882a593Smuzhiyun * in case the firmware security is enabled
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return rc;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun * hl_pci_set_dma_mask() - Set DMA masks for the device.
312*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * This function sets the DMA masks (regular and consistent) for a specified
315*4882a593Smuzhiyun * value. If it doesn't succeed, it tries to set it to a fall-back value
316*4882a593Smuzhiyun *
317*4882a593Smuzhiyun * Return: 0 on success, non-zero for failure.
318*4882a593Smuzhiyun */
hl_pci_set_dma_mask(struct hl_device * hdev)319*4882a593Smuzhiyun static int hl_pci_set_dma_mask(struct hl_device *hdev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
322*4882a593Smuzhiyun int rc;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* set DMA mask */
325*4882a593Smuzhiyun rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
326*4882a593Smuzhiyun if (rc) {
327*4882a593Smuzhiyun dev_err(hdev->dev,
328*4882a593Smuzhiyun "Failed to set pci dma mask to %d bits, error %d\n",
329*4882a593Smuzhiyun hdev->dma_mask, rc);
330*4882a593Smuzhiyun return rc;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(hdev->dma_mask));
334*4882a593Smuzhiyun if (rc) {
335*4882a593Smuzhiyun dev_err(hdev->dev,
336*4882a593Smuzhiyun "Failed to set pci consistent dma mask to %d bits, error %d\n",
337*4882a593Smuzhiyun hdev->dma_mask, rc);
338*4882a593Smuzhiyun return rc;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return 0;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun * hl_pci_init() - PCI initialization code.
346*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure.
347*4882a593Smuzhiyun * @cpu_boot_status_reg: status register of the device's CPU
348*4882a593Smuzhiyun * @boot_err0_reg: boot error register of the device's CPU
349*4882a593Smuzhiyun * @preboot_ver_timeout: how much to wait before bailing out on reading
350*4882a593Smuzhiyun * the preboot version
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * Set DMA masks, initialize the PCI controller and map the PCI BARs.
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * Return: 0 on success, non-zero for failure.
355*4882a593Smuzhiyun */
hl_pci_init(struct hl_device * hdev,u32 cpu_boot_status_reg,u32 boot_err0_reg,u32 preboot_ver_timeout)356*4882a593Smuzhiyun int hl_pci_init(struct hl_device *hdev, u32 cpu_boot_status_reg,
357*4882a593Smuzhiyun u32 boot_err0_reg, u32 preboot_ver_timeout)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct pci_dev *pdev = hdev->pdev;
360*4882a593Smuzhiyun int rc;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (hdev->reset_pcilink)
363*4882a593Smuzhiyun hl_pci_reset_link_through_bridge(hdev);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun rc = pci_enable_device_mem(pdev);
366*4882a593Smuzhiyun if (rc) {
367*4882a593Smuzhiyun dev_err(hdev->dev, "can't enable PCI device\n");
368*4882a593Smuzhiyun return rc;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pci_set_master(pdev);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun rc = hdev->asic_funcs->pci_bars_map(hdev);
374*4882a593Smuzhiyun if (rc) {
375*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to initialize PCI BARs\n");
376*4882a593Smuzhiyun goto disable_device;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun rc = hdev->asic_funcs->init_iatu(hdev);
380*4882a593Smuzhiyun if (rc) {
381*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to initialize iATU\n");
382*4882a593Smuzhiyun goto unmap_pci_bars;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun rc = hl_pci_set_dma_mask(hdev);
386*4882a593Smuzhiyun if (rc)
387*4882a593Smuzhiyun goto unmap_pci_bars;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Before continuing in the initialization, we need to read the preboot
390*4882a593Smuzhiyun * version to determine whether we run with a security-enabled firmware
391*4882a593Smuzhiyun * The check will be done in each ASIC's specific code
392*4882a593Smuzhiyun */
393*4882a593Smuzhiyun rc = hl_fw_read_preboot_ver(hdev, cpu_boot_status_reg, boot_err0_reg,
394*4882a593Smuzhiyun preboot_ver_timeout);
395*4882a593Smuzhiyun if (rc)
396*4882a593Smuzhiyun goto unmap_pci_bars;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun return 0;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun unmap_pci_bars:
401*4882a593Smuzhiyun hl_pci_bars_unmap(hdev);
402*4882a593Smuzhiyun disable_device:
403*4882a593Smuzhiyun pci_clear_master(pdev);
404*4882a593Smuzhiyun pci_disable_device(pdev);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return rc;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /**
410*4882a593Smuzhiyun * hl_fw_fini() - PCI finalization code.
411*4882a593Smuzhiyun * @hdev: Pointer to hl_device structure
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * Unmap PCI bars and disable PCI device.
414*4882a593Smuzhiyun */
hl_pci_fini(struct hl_device * hdev)415*4882a593Smuzhiyun void hl_pci_fini(struct hl_device *hdev)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun hl_pci_bars_unmap(hdev);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun pci_clear_master(hdev->pdev);
420*4882a593Smuzhiyun pci_disable_device(hdev->pdev);
421*4882a593Smuzhiyun }
422