1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2005-2011 Atheros Communications Inc.
4*4882a593Smuzhiyun * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include "bmi.h"
8*4882a593Smuzhiyun #include "hif.h"
9*4882a593Smuzhiyun #include "debug.h"
10*4882a593Smuzhiyun #include "htc.h"
11*4882a593Smuzhiyun #include "hw.h"
12*4882a593Smuzhiyun
ath10k_bmi_start(struct ath10k * ar)13*4882a593Smuzhiyun void ath10k_bmi_start(struct ath10k *ar)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun ar->bmi.done_sent = false;
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_bmi_start);
20*4882a593Smuzhiyun
ath10k_bmi_done(struct ath10k * ar)21*4882a593Smuzhiyun int ath10k_bmi_done(struct ath10k *ar)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun struct bmi_cmd cmd;
24*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
25*4882a593Smuzhiyun int ret;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun if (ar->bmi.done_sent) {
30*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
31*4882a593Smuzhiyun return 0;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun ar->bmi.done_sent = true;
35*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_DONE);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
38*4882a593Smuzhiyun if (ret) {
39*4882a593Smuzhiyun ath10k_warn(ar, "unable to write to the device: %d\n", ret);
40*4882a593Smuzhiyun return ret;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun return 0;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
ath10k_bmi_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)46*4882a593Smuzhiyun int ath10k_bmi_get_target_info(struct ath10k *ar,
47*4882a593Smuzhiyun struct bmi_target_info *target_info)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct bmi_cmd cmd;
50*4882a593Smuzhiyun union bmi_resp resp;
51*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
52*4882a593Smuzhiyun u32 resplen = sizeof(resp.get_target_info);
53*4882a593Smuzhiyun int ret;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (ar->bmi.done_sent) {
58*4882a593Smuzhiyun ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
59*4882a593Smuzhiyun return -EBUSY;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
65*4882a593Smuzhiyun if (ret) {
66*4882a593Smuzhiyun ath10k_warn(ar, "unable to get target info from device\n");
67*4882a593Smuzhiyun return ret;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (resplen < sizeof(resp.get_target_info)) {
71*4882a593Smuzhiyun ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
72*4882a593Smuzhiyun resplen);
73*4882a593Smuzhiyun return -EIO;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun target_info->version = __le32_to_cpu(resp.get_target_info.version);
77*4882a593Smuzhiyun target_info->type = __le32_to_cpu(resp.get_target_info.type);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun return 0;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define TARGET_VERSION_SENTINAL 0xffffffffu
83*4882a593Smuzhiyun
ath10k_bmi_get_target_info_sdio(struct ath10k * ar,struct bmi_target_info * target_info)84*4882a593Smuzhiyun int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
85*4882a593Smuzhiyun struct bmi_target_info *target_info)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct bmi_cmd cmd;
88*4882a593Smuzhiyun union bmi_resp resp;
89*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
90*4882a593Smuzhiyun u32 resplen, ver_len;
91*4882a593Smuzhiyun __le32 tmp;
92*4882a593Smuzhiyun int ret;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (ar->bmi.done_sent) {
97*4882a593Smuzhiyun ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
98*4882a593Smuzhiyun return -EBUSY;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Step 1: Read 4 bytes of the target info and check if it is
104*4882a593Smuzhiyun * the special sentinal version word or the first word in the
105*4882a593Smuzhiyun * version response.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun resplen = sizeof(u32);
108*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
109*4882a593Smuzhiyun if (ret) {
110*4882a593Smuzhiyun ath10k_warn(ar, "unable to read from device\n");
111*4882a593Smuzhiyun return ret;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Some SDIO boards have a special sentinal byte before the real
115*4882a593Smuzhiyun * version response.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
118*4882a593Smuzhiyun /* Step 1b: Read the version length */
119*4882a593Smuzhiyun resplen = sizeof(u32);
120*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
121*4882a593Smuzhiyun &resplen);
122*4882a593Smuzhiyun if (ret) {
123*4882a593Smuzhiyun ath10k_warn(ar, "unable to read from device\n");
124*4882a593Smuzhiyun return ret;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun ver_len = __le32_to_cpu(tmp);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Step 2: Check the target info length */
131*4882a593Smuzhiyun if (ver_len != sizeof(resp.get_target_info)) {
132*4882a593Smuzhiyun ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
133*4882a593Smuzhiyun ver_len, sizeof(resp.get_target_info));
134*4882a593Smuzhiyun return -EINVAL;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Step 3: Read the rest of the version response */
138*4882a593Smuzhiyun resplen = sizeof(resp.get_target_info) - sizeof(u32);
139*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
140*4882a593Smuzhiyun &resp.get_target_info.version,
141*4882a593Smuzhiyun &resplen);
142*4882a593Smuzhiyun if (ret) {
143*4882a593Smuzhiyun ath10k_warn(ar, "unable to read from device\n");
144*4882a593Smuzhiyun return ret;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun target_info->version = __le32_to_cpu(resp.get_target_info.version);
148*4882a593Smuzhiyun target_info->type = __le32_to_cpu(resp.get_target_info.type);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
ath10k_bmi_read_memory(struct ath10k * ar,u32 address,void * buffer,u32 length)153*4882a593Smuzhiyun int ath10k_bmi_read_memory(struct ath10k *ar,
154*4882a593Smuzhiyun u32 address, void *buffer, u32 length)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun struct bmi_cmd cmd;
157*4882a593Smuzhiyun union bmi_resp resp;
158*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
159*4882a593Smuzhiyun u32 rxlen;
160*4882a593Smuzhiyun int ret;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
163*4882a593Smuzhiyun address, length);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (ar->bmi.done_sent) {
166*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
167*4882a593Smuzhiyun return -EBUSY;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun while (length) {
171*4882a593Smuzhiyun rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
174*4882a593Smuzhiyun cmd.read_mem.addr = __cpu_to_le32(address);
175*4882a593Smuzhiyun cmd.read_mem.len = __cpu_to_le32(rxlen);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
178*4882a593Smuzhiyun &resp, &rxlen);
179*4882a593Smuzhiyun if (ret) {
180*4882a593Smuzhiyun ath10k_warn(ar, "unable to read from the device (%d)\n",
181*4882a593Smuzhiyun ret);
182*4882a593Smuzhiyun return ret;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun memcpy(buffer, resp.read_mem.payload, rxlen);
186*4882a593Smuzhiyun address += rxlen;
187*4882a593Smuzhiyun buffer += rxlen;
188*4882a593Smuzhiyun length -= rxlen;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun return 0;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun EXPORT_SYMBOL(ath10k_bmi_read_memory);
194*4882a593Smuzhiyun
ath10k_bmi_write_soc_reg(struct ath10k * ar,u32 address,u32 reg_val)195*4882a593Smuzhiyun int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct bmi_cmd cmd;
198*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
199*4882a593Smuzhiyun int ret;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI,
202*4882a593Smuzhiyun "bmi write soc register 0x%08x val 0x%08x\n",
203*4882a593Smuzhiyun address, reg_val);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (ar->bmi.done_sent) {
206*4882a593Smuzhiyun ath10k_warn(ar, "bmi write soc register command in progress\n");
207*4882a593Smuzhiyun return -EBUSY;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
211*4882a593Smuzhiyun cmd.write_soc_reg.addr = __cpu_to_le32(address);
212*4882a593Smuzhiyun cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
215*4882a593Smuzhiyun if (ret) {
216*4882a593Smuzhiyun ath10k_warn(ar, "Unable to write soc register to device: %d\n",
217*4882a593Smuzhiyun ret);
218*4882a593Smuzhiyun return ret;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
ath10k_bmi_read_soc_reg(struct ath10k * ar,u32 address,u32 * reg_val)224*4882a593Smuzhiyun int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct bmi_cmd cmd;
227*4882a593Smuzhiyun union bmi_resp resp;
228*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
229*4882a593Smuzhiyun u32 resplen = sizeof(resp.read_soc_reg);
230*4882a593Smuzhiyun int ret;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
233*4882a593Smuzhiyun address);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (ar->bmi.done_sent) {
236*4882a593Smuzhiyun ath10k_warn(ar, "bmi read soc register command in progress\n");
237*4882a593Smuzhiyun return -EBUSY;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
241*4882a593Smuzhiyun cmd.read_soc_reg.addr = __cpu_to_le32(address);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
244*4882a593Smuzhiyun if (ret) {
245*4882a593Smuzhiyun ath10k_warn(ar, "Unable to read soc register from device: %d\n",
246*4882a593Smuzhiyun ret);
247*4882a593Smuzhiyun return ret;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
253*4882a593Smuzhiyun *reg_val);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
ath10k_bmi_write_memory(struct ath10k * ar,u32 address,const void * buffer,u32 length)258*4882a593Smuzhiyun int ath10k_bmi_write_memory(struct ath10k *ar,
259*4882a593Smuzhiyun u32 address, const void *buffer, u32 length)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct bmi_cmd cmd;
262*4882a593Smuzhiyun u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
263*4882a593Smuzhiyun u32 txlen;
264*4882a593Smuzhiyun int ret;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
267*4882a593Smuzhiyun address, length);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (ar->bmi.done_sent) {
270*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
271*4882a593Smuzhiyun return -EBUSY;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun while (length) {
275*4882a593Smuzhiyun txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* copy before roundup to avoid reading beyond buffer*/
278*4882a593Smuzhiyun memcpy(cmd.write_mem.payload, buffer, txlen);
279*4882a593Smuzhiyun txlen = roundup(txlen, 4);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
282*4882a593Smuzhiyun cmd.write_mem.addr = __cpu_to_le32(address);
283*4882a593Smuzhiyun cmd.write_mem.len = __cpu_to_le32(txlen);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
286*4882a593Smuzhiyun NULL, NULL);
287*4882a593Smuzhiyun if (ret) {
288*4882a593Smuzhiyun ath10k_warn(ar, "unable to write to the device (%d)\n",
289*4882a593Smuzhiyun ret);
290*4882a593Smuzhiyun return ret;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* fixup roundup() so `length` zeroes out for last chunk */
294*4882a593Smuzhiyun txlen = min(txlen, length);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun address += txlen;
297*4882a593Smuzhiyun buffer += txlen;
298*4882a593Smuzhiyun length -= txlen;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
ath10k_bmi_execute(struct ath10k * ar,u32 address,u32 param,u32 * result)304*4882a593Smuzhiyun int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct bmi_cmd cmd;
307*4882a593Smuzhiyun union bmi_resp resp;
308*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
309*4882a593Smuzhiyun u32 resplen = sizeof(resp.execute);
310*4882a593Smuzhiyun int ret;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
313*4882a593Smuzhiyun address, param);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (ar->bmi.done_sent) {
316*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
317*4882a593Smuzhiyun return -EBUSY;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_EXECUTE);
321*4882a593Smuzhiyun cmd.execute.addr = __cpu_to_le32(address);
322*4882a593Smuzhiyun cmd.execute.param = __cpu_to_le32(param);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
325*4882a593Smuzhiyun if (ret) {
326*4882a593Smuzhiyun ath10k_warn(ar, "unable to read from the device\n");
327*4882a593Smuzhiyun return ret;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun if (resplen < sizeof(resp.execute)) {
331*4882a593Smuzhiyun ath10k_warn(ar, "invalid execute response length (%d)\n",
332*4882a593Smuzhiyun resplen);
333*4882a593Smuzhiyun return -EIO;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun *result = __le32_to_cpu(resp.execute.result);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
ath10k_bmi_lz_data_large(struct ath10k * ar,const void * buffer,u32 length)343*4882a593Smuzhiyun static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct bmi_cmd *cmd;
346*4882a593Smuzhiyun u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
347*4882a593Smuzhiyun u32 txlen;
348*4882a593Smuzhiyun int ret;
349*4882a593Smuzhiyun size_t buf_len;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%pK length %d\n",
352*4882a593Smuzhiyun buffer, length);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (ar->bmi.done_sent) {
355*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
356*4882a593Smuzhiyun return -EBUSY;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
360*4882a593Smuzhiyun cmd = kzalloc(buf_len, GFP_KERNEL);
361*4882a593Smuzhiyun if (!cmd)
362*4882a593Smuzhiyun return -ENOMEM;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun while (length) {
365*4882a593Smuzhiyun txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun WARN_ON_ONCE(txlen & 3);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun cmd->id = __cpu_to_le32(BMI_LZ_DATA);
370*4882a593Smuzhiyun cmd->lz_data.len = __cpu_to_le32(txlen);
371*4882a593Smuzhiyun memcpy(cmd->lz_data.payload, buffer, txlen);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
374*4882a593Smuzhiyun NULL, NULL);
375*4882a593Smuzhiyun if (ret) {
376*4882a593Smuzhiyun ath10k_warn(ar, "unable to write to the device\n");
377*4882a593Smuzhiyun kfree(cmd);
378*4882a593Smuzhiyun return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun buffer += txlen;
382*4882a593Smuzhiyun length -= txlen;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun kfree(cmd);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
ath10k_bmi_lz_data(struct ath10k * ar,const void * buffer,u32 length)390*4882a593Smuzhiyun int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct bmi_cmd cmd;
393*4882a593Smuzhiyun u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
394*4882a593Smuzhiyun u32 txlen;
395*4882a593Smuzhiyun int ret;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
398*4882a593Smuzhiyun buffer, length);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (ar->bmi.done_sent) {
401*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
402*4882a593Smuzhiyun return -EBUSY;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun while (length) {
406*4882a593Smuzhiyun txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun WARN_ON_ONCE(txlen & 3);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_LZ_DATA);
411*4882a593Smuzhiyun cmd.lz_data.len = __cpu_to_le32(txlen);
412*4882a593Smuzhiyun memcpy(cmd.lz_data.payload, buffer, txlen);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
415*4882a593Smuzhiyun NULL, NULL);
416*4882a593Smuzhiyun if (ret) {
417*4882a593Smuzhiyun ath10k_warn(ar, "unable to write to the device\n");
418*4882a593Smuzhiyun return ret;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun buffer += txlen;
422*4882a593Smuzhiyun length -= txlen;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
ath10k_bmi_lz_stream_start(struct ath10k * ar,u32 address)428*4882a593Smuzhiyun int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct bmi_cmd cmd;
431*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
432*4882a593Smuzhiyun int ret;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
435*4882a593Smuzhiyun address);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (ar->bmi.done_sent) {
438*4882a593Smuzhiyun ath10k_warn(ar, "command disallowed\n");
439*4882a593Smuzhiyun return -EBUSY;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
443*4882a593Smuzhiyun cmd.lz_start.addr = __cpu_to_le32(address);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
446*4882a593Smuzhiyun if (ret) {
447*4882a593Smuzhiyun ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
448*4882a593Smuzhiyun return ret;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
ath10k_bmi_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)454*4882a593Smuzhiyun int ath10k_bmi_fast_download(struct ath10k *ar,
455*4882a593Smuzhiyun u32 address, const void *buffer, u32 length)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun u8 trailer[4] = {};
458*4882a593Smuzhiyun u32 head_len = rounddown(length, 4);
459*4882a593Smuzhiyun u32 trailer_len = length - head_len;
460*4882a593Smuzhiyun int ret;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun ath10k_dbg(ar, ATH10K_DBG_BMI,
463*4882a593Smuzhiyun "bmi fast download address 0x%x buffer 0x%pK length %d\n",
464*4882a593Smuzhiyun address, buffer, length);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun ret = ath10k_bmi_lz_stream_start(ar, address);
467*4882a593Smuzhiyun if (ret)
468*4882a593Smuzhiyun return ret;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* copy the last word into a zero padded buffer */
471*4882a593Smuzhiyun if (trailer_len > 0)
472*4882a593Smuzhiyun memcpy(trailer, buffer + head_len, trailer_len);
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (ar->hw_params.bmi_large_size_download)
475*4882a593Smuzhiyun ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
476*4882a593Smuzhiyun else
477*4882a593Smuzhiyun ret = ath10k_bmi_lz_data(ar, buffer, head_len);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (ret)
480*4882a593Smuzhiyun return ret;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (trailer_len > 0)
483*4882a593Smuzhiyun ret = ath10k_bmi_lz_data(ar, trailer, 4);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (ret != 0)
486*4882a593Smuzhiyun return ret;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun * Close compressed stream and open a new (fake) one.
490*4882a593Smuzhiyun * This serves mainly to flush Target caches.
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun ret = ath10k_bmi_lz_stream_start(ar, 0x00);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
ath10k_bmi_set_start(struct ath10k * ar,u32 address)497*4882a593Smuzhiyun int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun struct bmi_cmd cmd;
500*4882a593Smuzhiyun u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
501*4882a593Smuzhiyun int ret;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (ar->bmi.done_sent) {
504*4882a593Smuzhiyun ath10k_warn(ar, "bmi set start command disallowed\n");
505*4882a593Smuzhiyun return -EBUSY;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun cmd.id = __cpu_to_le32(BMI_SET_APP_START);
509*4882a593Smuzhiyun cmd.set_app_start.addr = __cpu_to_le32(address);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
512*4882a593Smuzhiyun if (ret) {
513*4882a593Smuzhiyun ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
514*4882a593Smuzhiyun return ret;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun return 0;
518*4882a593Smuzhiyun }
519