1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Copyright 2016-2019 HabanaLabs, Ltd.
5*4882a593Smuzhiyun * All Rights Reserved.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "habanalabs.h"
9*4882a593Smuzhiyun #include "../include/common/hl_boot_if.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/firmware.h>
12*4882a593Smuzhiyun #include <linux/genalloc.h>
13*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun * hl_fw_load_fw_to_device() - Load F/W code to device's memory.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * @hdev: pointer to hl_device structure.
21*4882a593Smuzhiyun * @fw_name: the firmware image name
22*4882a593Smuzhiyun * @dst: IO memory mapped address space to copy firmware to
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Copy fw code from firmware file to device memory.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Return: 0 on success, non-zero for failure.
27*4882a593Smuzhiyun */
hl_fw_load_fw_to_device(struct hl_device * hdev,const char * fw_name,void __iomem * dst)28*4882a593Smuzhiyun int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
29*4882a593Smuzhiyun void __iomem *dst)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun const struct firmware *fw;
32*4882a593Smuzhiyun const u64 *fw_data;
33*4882a593Smuzhiyun size_t fw_size;
34*4882a593Smuzhiyun int rc;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun rc = request_firmware(&fw, fw_name, hdev->dev);
37*4882a593Smuzhiyun if (rc) {
38*4882a593Smuzhiyun dev_err(hdev->dev, "Firmware file %s is not found!\n", fw_name);
39*4882a593Smuzhiyun goto out;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun fw_size = fw->size;
43*4882a593Smuzhiyun if ((fw_size % 4) != 0) {
44*4882a593Smuzhiyun dev_err(hdev->dev, "Illegal %s firmware size %zu\n",
45*4882a593Smuzhiyun fw_name, fw_size);
46*4882a593Smuzhiyun rc = -EINVAL;
47*4882a593Smuzhiyun goto out;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun if (fw_size > FW_FILE_MAX_SIZE) {
53*4882a593Smuzhiyun dev_err(hdev->dev,
54*4882a593Smuzhiyun "FW file size %zu exceeds maximum of %u bytes\n",
55*4882a593Smuzhiyun fw_size, FW_FILE_MAX_SIZE);
56*4882a593Smuzhiyun rc = -EINVAL;
57*4882a593Smuzhiyun goto out;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun fw_data = (const u64 *) fw->data;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun memcpy_toio(dst, fw_data, fw_size);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun out:
65*4882a593Smuzhiyun release_firmware(fw);
66*4882a593Smuzhiyun return rc;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
hl_fw_send_pci_access_msg(struct hl_device * hdev,u32 opcode)69*4882a593Smuzhiyun int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct cpucp_packet pkt = {};
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
76*4882a593Smuzhiyun sizeof(pkt), 0, NULL);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
hl_fw_send_cpu_message(struct hl_device * hdev,u32 hw_queue_id,u32 * msg,u16 len,u32 timeout,long * result)79*4882a593Smuzhiyun int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
80*4882a593Smuzhiyun u16 len, u32 timeout, long *result)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct cpucp_packet *pkt;
83*4882a593Smuzhiyun dma_addr_t pkt_dma_addr;
84*4882a593Smuzhiyun u32 tmp;
85*4882a593Smuzhiyun int rc = 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
88*4882a593Smuzhiyun &pkt_dma_addr);
89*4882a593Smuzhiyun if (!pkt) {
90*4882a593Smuzhiyun dev_err(hdev->dev,
91*4882a593Smuzhiyun "Failed to allocate DMA memory for packet to CPU\n");
92*4882a593Smuzhiyun return -ENOMEM;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun memcpy(pkt, msg, len);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun mutex_lock(&hdev->send_cpu_message_lock);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if (hdev->disabled)
100*4882a593Smuzhiyun goto out;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (hdev->device_cpu_disabled) {
103*4882a593Smuzhiyun rc = -EIO;
104*4882a593Smuzhiyun goto out;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, len, pkt_dma_addr);
108*4882a593Smuzhiyun if (rc) {
109*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc);
110*4882a593Smuzhiyun goto out;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
114*4882a593Smuzhiyun (tmp == CPUCP_PACKET_FENCE_VAL), 1000,
115*4882a593Smuzhiyun timeout, true);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (rc == -ETIMEDOUT) {
120*4882a593Smuzhiyun dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
121*4882a593Smuzhiyun hdev->device_cpu_disabled = true;
122*4882a593Smuzhiyun goto out;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun tmp = le32_to_cpu(pkt->ctl);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
128*4882a593Smuzhiyun if (rc) {
129*4882a593Smuzhiyun dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
130*4882a593Smuzhiyun rc,
131*4882a593Smuzhiyun (tmp & CPUCP_PKT_CTL_OPCODE_MASK)
132*4882a593Smuzhiyun >> CPUCP_PKT_CTL_OPCODE_SHIFT);
133*4882a593Smuzhiyun rc = -EIO;
134*4882a593Smuzhiyun } else if (result) {
135*4882a593Smuzhiyun *result = (long) le64_to_cpu(pkt->result);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun out:
139*4882a593Smuzhiyun mutex_unlock(&hdev->send_cpu_message_lock);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return rc;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
hl_fw_unmask_irq(struct hl_device * hdev,u16 event_type)146*4882a593Smuzhiyun int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct cpucp_packet pkt;
149*4882a593Smuzhiyun long result;
150*4882a593Smuzhiyun int rc;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun memset(&pkt, 0, sizeof(pkt));
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ <<
155*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
156*4882a593Smuzhiyun pkt.value = cpu_to_le64(event_type);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
159*4882a593Smuzhiyun 0, &result);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (rc)
162*4882a593Smuzhiyun dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun return rc;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
hl_fw_unmask_irq_arr(struct hl_device * hdev,const u32 * irq_arr,size_t irq_arr_size)167*4882a593Smuzhiyun int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
168*4882a593Smuzhiyun size_t irq_arr_size)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct cpucp_unmask_irq_arr_packet *pkt;
171*4882a593Smuzhiyun size_t total_pkt_size;
172*4882a593Smuzhiyun long result;
173*4882a593Smuzhiyun int rc;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun total_pkt_size = sizeof(struct cpucp_unmask_irq_arr_packet) +
176*4882a593Smuzhiyun irq_arr_size;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* data should be aligned to 8 bytes in order to CPU-CP to copy it */
179*4882a593Smuzhiyun total_pkt_size = (total_pkt_size + 0x7) & ~0x7;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* total_pkt_size is casted to u16 later on */
182*4882a593Smuzhiyun if (total_pkt_size > USHRT_MAX) {
183*4882a593Smuzhiyun dev_err(hdev->dev, "too many elements in IRQ array\n");
184*4882a593Smuzhiyun return -EINVAL;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun pkt = kzalloc(total_pkt_size, GFP_KERNEL);
188*4882a593Smuzhiyun if (!pkt)
189*4882a593Smuzhiyun return -ENOMEM;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun pkt->length = cpu_to_le32(irq_arr_size / sizeof(irq_arr[0]));
192*4882a593Smuzhiyun memcpy(&pkt->irqs, irq_arr, irq_arr_size);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun pkt->cpucp_pkt.ctl = cpu_to_le32(CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
195*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
198*4882a593Smuzhiyun total_pkt_size, 0, &result);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (rc)
201*4882a593Smuzhiyun dev_err(hdev->dev, "failed to unmask IRQ array\n");
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun kfree(pkt);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return rc;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
hl_fw_test_cpu_queue(struct hl_device * hdev)208*4882a593Smuzhiyun int hl_fw_test_cpu_queue(struct hl_device *hdev)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct cpucp_packet test_pkt = {};
211*4882a593Smuzhiyun long result;
212*4882a593Smuzhiyun int rc;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun test_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
215*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
216*4882a593Smuzhiyun test_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
219*4882a593Smuzhiyun sizeof(test_pkt), 0, &result);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (!rc) {
222*4882a593Smuzhiyun if (result != CPUCP_PACKET_FENCE_VAL)
223*4882a593Smuzhiyun dev_err(hdev->dev,
224*4882a593Smuzhiyun "CPU queue test failed (0x%08lX)\n", result);
225*4882a593Smuzhiyun } else {
226*4882a593Smuzhiyun dev_err(hdev->dev, "CPU queue test failed, error %d\n", rc);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return rc;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device * hdev,size_t size,dma_addr_t * dma_handle)232*4882a593Smuzhiyun void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
233*4882a593Smuzhiyun dma_addr_t *dma_handle)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun u64 kernel_addr;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun *dma_handle = hdev->cpu_accessible_dma_address +
240*4882a593Smuzhiyun (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return (void *) (uintptr_t) kernel_addr;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
hl_fw_cpu_accessible_dma_pool_free(struct hl_device * hdev,size_t size,void * vaddr)245*4882a593Smuzhiyun void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
246*4882a593Smuzhiyun void *vaddr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr,
249*4882a593Smuzhiyun size);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
hl_fw_send_heartbeat(struct hl_device * hdev)252*4882a593Smuzhiyun int hl_fw_send_heartbeat(struct hl_device *hdev)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct cpucp_packet hb_pkt = {};
255*4882a593Smuzhiyun long result;
256*4882a593Smuzhiyun int rc;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun hb_pkt.ctl = cpu_to_le32(CPUCP_PACKET_TEST <<
259*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
260*4882a593Smuzhiyun hb_pkt.value = cpu_to_le64(CPUCP_PACKET_FENCE_VAL);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
263*4882a593Smuzhiyun sizeof(hb_pkt), 0, &result);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if ((rc) || (result != CPUCP_PACKET_FENCE_VAL))
266*4882a593Smuzhiyun rc = -EIO;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return rc;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
hl_fw_cpucp_info_get(struct hl_device * hdev)271*4882a593Smuzhiyun int hl_fw_cpucp_info_get(struct hl_device *hdev)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct asic_fixed_properties *prop = &hdev->asic_prop;
274*4882a593Smuzhiyun struct cpucp_packet pkt = {};
275*4882a593Smuzhiyun void *cpucp_info_cpu_addr;
276*4882a593Smuzhiyun dma_addr_t cpucp_info_dma_addr;
277*4882a593Smuzhiyun long result;
278*4882a593Smuzhiyun int rc;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun cpucp_info_cpu_addr =
281*4882a593Smuzhiyun hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
282*4882a593Smuzhiyun sizeof(struct cpucp_info),
283*4882a593Smuzhiyun &cpucp_info_dma_addr);
284*4882a593Smuzhiyun if (!cpucp_info_cpu_addr) {
285*4882a593Smuzhiyun dev_err(hdev->dev,
286*4882a593Smuzhiyun "Failed to allocate DMA memory for CPU-CP info packet\n");
287*4882a593Smuzhiyun return -ENOMEM;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun memset(cpucp_info_cpu_addr, 0, sizeof(struct cpucp_info));
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_INFO_GET <<
293*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
294*4882a593Smuzhiyun pkt.addr = cpu_to_le64(cpucp_info_dma_addr);
295*4882a593Smuzhiyun pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_info));
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
298*4882a593Smuzhiyun HL_CPUCP_INFO_TIMEOUT_USEC, &result);
299*4882a593Smuzhiyun if (rc) {
300*4882a593Smuzhiyun dev_err(hdev->dev,
301*4882a593Smuzhiyun "Failed to handle CPU-CP info pkt, error %d\n", rc);
302*4882a593Smuzhiyun goto out;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun memcpy(&prop->cpucp_info, cpucp_info_cpu_addr,
306*4882a593Smuzhiyun sizeof(prop->cpucp_info));
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun rc = hl_build_hwmon_channel_info(hdev, prop->cpucp_info.sensors);
309*4882a593Smuzhiyun if (rc) {
310*4882a593Smuzhiyun dev_err(hdev->dev,
311*4882a593Smuzhiyun "Failed to build hwmon channel info, error %d\n", rc);
312*4882a593Smuzhiyun rc = -EFAULT;
313*4882a593Smuzhiyun goto out;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun out:
317*4882a593Smuzhiyun hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
318*4882a593Smuzhiyun sizeof(struct cpucp_info), cpucp_info_cpu_addr);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return rc;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
hl_fw_get_eeprom_data(struct hl_device * hdev,void * data,size_t max_size)323*4882a593Smuzhiyun int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct cpucp_packet pkt = {};
326*4882a593Smuzhiyun void *eeprom_info_cpu_addr;
327*4882a593Smuzhiyun dma_addr_t eeprom_info_dma_addr;
328*4882a593Smuzhiyun long result;
329*4882a593Smuzhiyun int rc;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun eeprom_info_cpu_addr =
332*4882a593Smuzhiyun hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
333*4882a593Smuzhiyun max_size, &eeprom_info_dma_addr);
334*4882a593Smuzhiyun if (!eeprom_info_cpu_addr) {
335*4882a593Smuzhiyun dev_err(hdev->dev,
336*4882a593Smuzhiyun "Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
337*4882a593Smuzhiyun return -ENOMEM;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun memset(eeprom_info_cpu_addr, 0, max_size);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_EEPROM_DATA_GET <<
343*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
344*4882a593Smuzhiyun pkt.addr = cpu_to_le64(eeprom_info_dma_addr);
345*4882a593Smuzhiyun pkt.data_max_size = cpu_to_le32(max_size);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
348*4882a593Smuzhiyun HL_CPUCP_EEPROM_TIMEOUT_USEC, &result);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (rc) {
351*4882a593Smuzhiyun dev_err(hdev->dev,
352*4882a593Smuzhiyun "Failed to handle CPU-CP EEPROM packet, error %d\n",
353*4882a593Smuzhiyun rc);
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /* result contains the actual size */
358*4882a593Smuzhiyun memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun out:
361*4882a593Smuzhiyun hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
362*4882a593Smuzhiyun eeprom_info_cpu_addr);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return rc;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
hl_fw_cpucp_pci_counters_get(struct hl_device * hdev,struct hl_info_pci_counters * counters)367*4882a593Smuzhiyun int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
368*4882a593Smuzhiyun struct hl_info_pci_counters *counters)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun struct cpucp_packet pkt = {};
371*4882a593Smuzhiyun long result;
372*4882a593Smuzhiyun int rc;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
375*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Fetch PCI rx counter */
378*4882a593Smuzhiyun pkt.index = cpu_to_le32(cpucp_pcie_throughput_rx);
379*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
380*4882a593Smuzhiyun HL_CPUCP_INFO_TIMEOUT_USEC, &result);
381*4882a593Smuzhiyun if (rc) {
382*4882a593Smuzhiyun dev_err(hdev->dev,
383*4882a593Smuzhiyun "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
384*4882a593Smuzhiyun return rc;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun counters->rx_throughput = result;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun memset(&pkt, 0, sizeof(pkt));
389*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
390*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Fetch PCI tx counter */
393*4882a593Smuzhiyun pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
394*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
395*4882a593Smuzhiyun HL_CPUCP_INFO_TIMEOUT_USEC, &result);
396*4882a593Smuzhiyun if (rc) {
397*4882a593Smuzhiyun dev_err(hdev->dev,
398*4882a593Smuzhiyun "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
399*4882a593Smuzhiyun return rc;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun counters->tx_throughput = result;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Fetch PCI replay counter */
404*4882a593Smuzhiyun memset(&pkt, 0, sizeof(pkt));
405*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
406*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
409*4882a593Smuzhiyun HL_CPUCP_INFO_TIMEOUT_USEC, &result);
410*4882a593Smuzhiyun if (rc) {
411*4882a593Smuzhiyun dev_err(hdev->dev,
412*4882a593Smuzhiyun "Failed to handle CPU-CP PCI info pkt, error %d\n", rc);
413*4882a593Smuzhiyun return rc;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun counters->replay_cnt = (u32) result;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return rc;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
hl_fw_cpucp_total_energy_get(struct hl_device * hdev,u64 * total_energy)420*4882a593Smuzhiyun int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct cpucp_packet pkt = {};
423*4882a593Smuzhiyun long result;
424*4882a593Smuzhiyun int rc;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun pkt.ctl = cpu_to_le32(CPUCP_PACKET_TOTAL_ENERGY_GET <<
427*4882a593Smuzhiyun CPUCP_PKT_CTL_OPCODE_SHIFT);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
430*4882a593Smuzhiyun HL_CPUCP_INFO_TIMEOUT_USEC, &result);
431*4882a593Smuzhiyun if (rc) {
432*4882a593Smuzhiyun dev_err(hdev->dev,
433*4882a593Smuzhiyun "Failed to handle CpuCP total energy pkt, error %d\n",
434*4882a593Smuzhiyun rc);
435*4882a593Smuzhiyun return rc;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun *total_energy = result;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return rc;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
fw_read_errors(struct hl_device * hdev,u32 boot_err0_reg)443*4882a593Smuzhiyun static void fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun u32 err_val;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* Some of the firmware status codes are deprecated in newer f/w
448*4882a593Smuzhiyun * versions. In those versions, the errors are reported
449*4882a593Smuzhiyun * in different registers. Therefore, we need to check those
450*4882a593Smuzhiyun * registers and print the exact errors. Moreover, there
451*4882a593Smuzhiyun * may be multiple errors, so we need to report on each error
452*4882a593Smuzhiyun * separately. Some of the error codes might indicate a state
453*4882a593Smuzhiyun * that is not an error per-se, but it is an error in production
454*4882a593Smuzhiyun * environment
455*4882a593Smuzhiyun */
456*4882a593Smuzhiyun err_val = RREG32(boot_err0_reg);
457*4882a593Smuzhiyun if (!(err_val & CPU_BOOT_ERR0_ENABLED))
458*4882a593Smuzhiyun return;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_DRAM_INIT_FAIL)
461*4882a593Smuzhiyun dev_err(hdev->dev,
462*4882a593Smuzhiyun "Device boot error - DRAM initialization failed\n");
463*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_FIT_CORRUPTED)
464*4882a593Smuzhiyun dev_err(hdev->dev, "Device boot error - FIT image corrupted\n");
465*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_TS_INIT_FAIL)
466*4882a593Smuzhiyun dev_err(hdev->dev,
467*4882a593Smuzhiyun "Device boot error - Thermal Sensor initialization failed\n");
468*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED)
469*4882a593Smuzhiyun dev_warn(hdev->dev,
470*4882a593Smuzhiyun "Device boot warning - Skipped DRAM initialization\n");
471*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED)
472*4882a593Smuzhiyun dev_warn(hdev->dev,
473*4882a593Smuzhiyun "Device boot error - Skipped waiting for BMC\n");
474*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_NIC_DATA_NOT_RDY)
475*4882a593Smuzhiyun dev_err(hdev->dev,
476*4882a593Smuzhiyun "Device boot error - Serdes data from BMC not available\n");
477*4882a593Smuzhiyun if (err_val & CPU_BOOT_ERR0_NIC_FW_FAIL)
478*4882a593Smuzhiyun dev_err(hdev->dev,
479*4882a593Smuzhiyun "Device boot error - NIC F/W initialization failed\n");
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
detect_cpu_boot_status(struct hl_device * hdev,u32 status)482*4882a593Smuzhiyun static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun /* Some of the status codes below are deprecated in newer f/w
485*4882a593Smuzhiyun * versions but we keep them here for backward compatibility
486*4882a593Smuzhiyun */
487*4882a593Smuzhiyun switch (status) {
488*4882a593Smuzhiyun case CPU_BOOT_STATUS_NA:
489*4882a593Smuzhiyun dev_err(hdev->dev,
490*4882a593Smuzhiyun "Device boot error - BTL did NOT run\n");
491*4882a593Smuzhiyun break;
492*4882a593Smuzhiyun case CPU_BOOT_STATUS_IN_WFE:
493*4882a593Smuzhiyun dev_err(hdev->dev,
494*4882a593Smuzhiyun "Device boot error - Stuck inside WFE loop\n");
495*4882a593Smuzhiyun break;
496*4882a593Smuzhiyun case CPU_BOOT_STATUS_IN_BTL:
497*4882a593Smuzhiyun dev_err(hdev->dev,
498*4882a593Smuzhiyun "Device boot error - Stuck in BTL\n");
499*4882a593Smuzhiyun break;
500*4882a593Smuzhiyun case CPU_BOOT_STATUS_IN_PREBOOT:
501*4882a593Smuzhiyun dev_err(hdev->dev,
502*4882a593Smuzhiyun "Device boot error - Stuck in Preboot\n");
503*4882a593Smuzhiyun break;
504*4882a593Smuzhiyun case CPU_BOOT_STATUS_IN_SPL:
505*4882a593Smuzhiyun dev_err(hdev->dev,
506*4882a593Smuzhiyun "Device boot error - Stuck in SPL\n");
507*4882a593Smuzhiyun break;
508*4882a593Smuzhiyun case CPU_BOOT_STATUS_IN_UBOOT:
509*4882a593Smuzhiyun dev_err(hdev->dev,
510*4882a593Smuzhiyun "Device boot error - Stuck in u-boot\n");
511*4882a593Smuzhiyun break;
512*4882a593Smuzhiyun case CPU_BOOT_STATUS_DRAM_INIT_FAIL:
513*4882a593Smuzhiyun dev_err(hdev->dev,
514*4882a593Smuzhiyun "Device boot error - DRAM initialization failed\n");
515*4882a593Smuzhiyun break;
516*4882a593Smuzhiyun case CPU_BOOT_STATUS_UBOOT_NOT_READY:
517*4882a593Smuzhiyun dev_err(hdev->dev,
518*4882a593Smuzhiyun "Device boot error - u-boot stopped by user\n");
519*4882a593Smuzhiyun break;
520*4882a593Smuzhiyun case CPU_BOOT_STATUS_TS_INIT_FAIL:
521*4882a593Smuzhiyun dev_err(hdev->dev,
522*4882a593Smuzhiyun "Device boot error - Thermal Sensor initialization failed\n");
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun default:
525*4882a593Smuzhiyun dev_err(hdev->dev,
526*4882a593Smuzhiyun "Device boot error - Invalid status code %d\n",
527*4882a593Smuzhiyun status);
528*4882a593Smuzhiyun break;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
hl_fw_read_preboot_ver(struct hl_device * hdev,u32 cpu_boot_status_reg,u32 boot_err0_reg,u32 timeout)532*4882a593Smuzhiyun int hl_fw_read_preboot_ver(struct hl_device *hdev, u32 cpu_boot_status_reg,
533*4882a593Smuzhiyun u32 boot_err0_reg, u32 timeout)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun u32 status;
536*4882a593Smuzhiyun int rc;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (!hdev->cpu_enable)
539*4882a593Smuzhiyun return 0;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* Need to check two possible scenarios:
542*4882a593Smuzhiyun *
543*4882a593Smuzhiyun * CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT - for newer firmwares where
544*4882a593Smuzhiyun * the preboot is waiting for the boot fit
545*4882a593Smuzhiyun *
546*4882a593Smuzhiyun * All other status values - for older firmwares where the uboot was
547*4882a593Smuzhiyun * loaded from the FLASH
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun rc = hl_poll_timeout(
550*4882a593Smuzhiyun hdev,
551*4882a593Smuzhiyun cpu_boot_status_reg,
552*4882a593Smuzhiyun status,
553*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_IN_UBOOT) ||
554*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_DRAM_RDY) ||
555*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
556*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
557*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
558*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
559*4882a593Smuzhiyun 10000,
560*4882a593Smuzhiyun timeout);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun if (rc) {
563*4882a593Smuzhiyun dev_err(hdev->dev, "Failed to read preboot version\n");
564*4882a593Smuzhiyun detect_cpu_boot_status(hdev, status);
565*4882a593Smuzhiyun fw_read_errors(hdev, boot_err0_reg);
566*4882a593Smuzhiyun return -EIO;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_PREBOOT);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
hl_fw_init_cpu(struct hl_device * hdev,u32 cpu_boot_status_reg,u32 msg_to_cpu_reg,u32 cpu_msg_status_reg,u32 boot_err0_reg,bool skip_bmc,u32 cpu_timeout,u32 boot_fit_timeout)574*4882a593Smuzhiyun int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
575*4882a593Smuzhiyun u32 msg_to_cpu_reg, u32 cpu_msg_status_reg,
576*4882a593Smuzhiyun u32 boot_err0_reg, bool skip_bmc,
577*4882a593Smuzhiyun u32 cpu_timeout, u32 boot_fit_timeout)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun u32 status;
580*4882a593Smuzhiyun int rc;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun dev_info(hdev->dev, "Going to wait for device boot (up to %lds)\n",
583*4882a593Smuzhiyun cpu_timeout / USEC_PER_SEC);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Wait for boot FIT request */
586*4882a593Smuzhiyun rc = hl_poll_timeout(
587*4882a593Smuzhiyun hdev,
588*4882a593Smuzhiyun cpu_boot_status_reg,
589*4882a593Smuzhiyun status,
590*4882a593Smuzhiyun status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
591*4882a593Smuzhiyun 10000,
592*4882a593Smuzhiyun boot_fit_timeout);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (rc) {
595*4882a593Smuzhiyun dev_dbg(hdev->dev,
596*4882a593Smuzhiyun "No boot fit request received, resuming boot\n");
597*4882a593Smuzhiyun } else {
598*4882a593Smuzhiyun rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
599*4882a593Smuzhiyun if (rc)
600*4882a593Smuzhiyun goto out;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* Clear device CPU message status */
603*4882a593Smuzhiyun WREG32(cpu_msg_status_reg, CPU_MSG_CLR);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* Signal device CPU that boot loader is ready */
606*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* Poll for CPU device ack */
609*4882a593Smuzhiyun rc = hl_poll_timeout(
610*4882a593Smuzhiyun hdev,
611*4882a593Smuzhiyun cpu_msg_status_reg,
612*4882a593Smuzhiyun status,
613*4882a593Smuzhiyun status == CPU_MSG_OK,
614*4882a593Smuzhiyun 10000,
615*4882a593Smuzhiyun boot_fit_timeout);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (rc) {
618*4882a593Smuzhiyun dev_err(hdev->dev,
619*4882a593Smuzhiyun "Timeout waiting for boot fit load ack\n");
620*4882a593Smuzhiyun goto out;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* Clear message */
624*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_NA);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Make sure CPU boot-loader is running */
628*4882a593Smuzhiyun rc = hl_poll_timeout(
629*4882a593Smuzhiyun hdev,
630*4882a593Smuzhiyun cpu_boot_status_reg,
631*4882a593Smuzhiyun status,
632*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_DRAM_RDY) ||
633*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
634*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
635*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_SRAM_AVAIL),
636*4882a593Smuzhiyun 10000,
637*4882a593Smuzhiyun cpu_timeout);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* Read U-Boot version now in case we will later fail */
640*4882a593Smuzhiyun hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (rc) {
643*4882a593Smuzhiyun detect_cpu_boot_status(hdev, status);
644*4882a593Smuzhiyun rc = -EIO;
645*4882a593Smuzhiyun goto out;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (!hdev->fw_loading) {
649*4882a593Smuzhiyun dev_info(hdev->dev, "Skip loading FW\n");
650*4882a593Smuzhiyun goto out;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun if (status == CPU_BOOT_STATUS_SRAM_AVAIL)
654*4882a593Smuzhiyun goto out;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun dev_info(hdev->dev,
657*4882a593Smuzhiyun "Loading firmware to device, may take some time...\n");
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun rc = hdev->asic_funcs->load_firmware_to_device(hdev);
660*4882a593Smuzhiyun if (rc)
661*4882a593Smuzhiyun goto out;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if (skip_bmc) {
664*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_SKIP_BMC);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun rc = hl_poll_timeout(
667*4882a593Smuzhiyun hdev,
668*4882a593Smuzhiyun cpu_boot_status_reg,
669*4882a593Smuzhiyun status,
670*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
671*4882a593Smuzhiyun 10000,
672*4882a593Smuzhiyun cpu_timeout);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun if (rc) {
675*4882a593Smuzhiyun dev_err(hdev->dev,
676*4882a593Smuzhiyun "Failed to get ACK on skipping BMC, %d\n",
677*4882a593Smuzhiyun status);
678*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_NA);
679*4882a593Smuzhiyun rc = -EIO;
680*4882a593Smuzhiyun goto out;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_FIT_RDY);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun rc = hl_poll_timeout(
687*4882a593Smuzhiyun hdev,
688*4882a593Smuzhiyun cpu_boot_status_reg,
689*4882a593Smuzhiyun status,
690*4882a593Smuzhiyun (status == CPU_BOOT_STATUS_SRAM_AVAIL),
691*4882a593Smuzhiyun 10000,
692*4882a593Smuzhiyun cpu_timeout);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* Clear message */
695*4882a593Smuzhiyun WREG32(msg_to_cpu_reg, KMD_MSG_NA);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (rc) {
698*4882a593Smuzhiyun if (status == CPU_BOOT_STATUS_FIT_CORRUPTED)
699*4882a593Smuzhiyun dev_err(hdev->dev,
700*4882a593Smuzhiyun "Device reports FIT image is corrupted\n");
701*4882a593Smuzhiyun else
702*4882a593Smuzhiyun dev_err(hdev->dev,
703*4882a593Smuzhiyun "Failed to load firmware to device, %d\n",
704*4882a593Smuzhiyun status);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun rc = -EIO;
707*4882a593Smuzhiyun goto out;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun dev_info(hdev->dev, "Successfully loaded firmware to device\n");
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun out:
713*4882a593Smuzhiyun fw_read_errors(hdev, boot_err0_reg);
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun return rc;
716*4882a593Smuzhiyun }
717