1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (C) 2019-2021 Linaro Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/device.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/bitfield.h>
11*4882a593Smuzhiyun #include <linux/dma-direction.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "gsi.h"
14*4882a593Smuzhiyun #include "gsi_trans.h"
15*4882a593Smuzhiyun #include "ipa.h"
16*4882a593Smuzhiyun #include "ipa_endpoint.h"
17*4882a593Smuzhiyun #include "ipa_table.h"
18*4882a593Smuzhiyun #include "ipa_cmd.h"
19*4882a593Smuzhiyun #include "ipa_mem.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun * DOC: IPA Immediate Commands
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The AP command TX endpoint is used to issue immediate commands to the IPA.
25*4882a593Smuzhiyun * An immediate command is generally used to request the IPA do something
26*4882a593Smuzhiyun * other than data transfer to another endpoint.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * Immediate commands are represented by GSI transactions just like other
29*4882a593Smuzhiyun * transfer requests, represented by a single GSI TRE. Each immediate
30*4882a593Smuzhiyun * command has a well-defined format, having a payload of a known length.
31*4882a593Smuzhiyun * This allows the transfer element's length field to be used to hold an
32*4882a593Smuzhiyun * immediate command's opcode. The payload for a command resides in DRAM
33*4882a593Smuzhiyun * and is described by a single scatterlist entry in its transaction.
34*4882a593Smuzhiyun * Commands do not require a transaction completion callback. To commit
35*4882a593Smuzhiyun * an immediate command transaction, either gsi_trans_commit_wait() or
36*4882a593Smuzhiyun * gsi_trans_commit_wait_timeout() is used.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Some commands can wait until indicated pipeline stages are clear */
40*4882a593Smuzhiyun enum pipeline_clear_options {
41*4882a593Smuzhiyun pipeline_clear_hps = 0,
42*4882a593Smuzhiyun pipeline_clear_src_grp = 1,
43*4882a593Smuzhiyun pipeline_clear_full = 2,
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun struct ipa_cmd_hw_ip_fltrt_init {
49*4882a593Smuzhiyun __le64 hash_rules_addr;
50*4882a593Smuzhiyun __le64 flags;
51*4882a593Smuzhiyun __le64 nhash_rules_addr;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
55*4882a593Smuzhiyun #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
56*4882a593Smuzhiyun #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
57*4882a593Smuzhiyun #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
58*4882a593Smuzhiyun #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* IPA_CMD_HDR_INIT_LOCAL */
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun struct ipa_cmd_hw_hdr_init_local {
63*4882a593Smuzhiyun __le64 hdr_table_addr;
64*4882a593Smuzhiyun __le32 flags;
65*4882a593Smuzhiyun __le32 reserved;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
69*4882a593Smuzhiyun #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
70*4882a593Smuzhiyun #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* IPA_CMD_REGISTER_WRITE */
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
77*4882a593Smuzhiyun #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct ipa_cmd_register_write {
80*4882a593Smuzhiyun __le16 flags; /* Unused/reserved for IPA v3.5.1 */
81*4882a593Smuzhiyun __le16 offset;
82*4882a593Smuzhiyun __le32 value;
83*4882a593Smuzhiyun __le32 value_mask;
84*4882a593Smuzhiyun __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Field masks for ipa_cmd_register_write structure fields */
88*4882a593Smuzhiyun /* The next field is present for IPA v4.0 and above */
89*4882a593Smuzhiyun #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
90*4882a593Smuzhiyun /* The next field is present for IPA v3.5.1 only */
91*4882a593Smuzhiyun #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* The next field and its values are present for IPA v3.5.1 only */
94*4882a593Smuzhiyun #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* IPA_CMD_IP_PACKET_INIT */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun struct ipa_cmd_ip_packet_init {
99*4882a593Smuzhiyun u8 dest_endpoint;
100*4882a593Smuzhiyun u8 reserved[7];
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
104*4882a593Smuzhiyun #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* IPA_CMD_DMA_SHARED_MEM */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* For IPA v4.0+, this opcode gets modified with pipeline clear options */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
111*4882a593Smuzhiyun #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun struct ipa_cmd_hw_dma_mem_mem {
114*4882a593Smuzhiyun __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
115*4882a593Smuzhiyun __le16 size;
116*4882a593Smuzhiyun __le16 local_addr;
117*4882a593Smuzhiyun __le16 flags;
118*4882a593Smuzhiyun __le64 system_addr;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Flag allowing atomic clear of target region after reading data (v4.0+)*/
122*4882a593Smuzhiyun #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
125*4882a593Smuzhiyun #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
126*4882a593Smuzhiyun /* The next two fields are present for IPA v3.5.1 only. */
127*4882a593Smuzhiyun #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
128*4882a593Smuzhiyun #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* IPA_CMD_IP_PACKET_TAG_STATUS */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct ipa_cmd_ip_packet_tag_status {
133*4882a593Smuzhiyun __le64 tag;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Immediate command payload */
139*4882a593Smuzhiyun union ipa_cmd_payload {
140*4882a593Smuzhiyun struct ipa_cmd_hw_ip_fltrt_init table_init;
141*4882a593Smuzhiyun struct ipa_cmd_hw_hdr_init_local hdr_init_local;
142*4882a593Smuzhiyun struct ipa_cmd_register_write register_write;
143*4882a593Smuzhiyun struct ipa_cmd_ip_packet_init ip_packet_init;
144*4882a593Smuzhiyun struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
145*4882a593Smuzhiyun struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun
ipa_cmd_validate_build(void)148*4882a593Smuzhiyun static void ipa_cmd_validate_build(void)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun /* The sizes of a filter and route tables need to fit into fields
151*4882a593Smuzhiyun * in the ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
152*4882a593Smuzhiyun * might not be used, non-hashed and hashed tables have the same
153*4882a593Smuzhiyun * maximum size. IPv4 and IPv6 filter tables have the same number
154*4882a593Smuzhiyun * of entries, as and IPv4 and IPv6 route tables have the same number
155*4882a593Smuzhiyun * of entries.
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun #define TABLE_SIZE (TABLE_COUNT_MAX * sizeof(__le64))
158*4882a593Smuzhiyun #define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
159*4882a593Smuzhiyun BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
160*4882a593Smuzhiyun BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
161*4882a593Smuzhiyun #undef TABLE_COUNT_MAX
162*4882a593Smuzhiyun #undef TABLE_SIZE
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #ifdef IPA_VALIDATE
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Validate a memory region holding a table */
ipa_cmd_table_valid(struct ipa * ipa,const struct ipa_mem * mem,bool route,bool ipv6,bool hashed)168*4882a593Smuzhiyun bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
169*4882a593Smuzhiyun bool route, bool ipv6, bool hashed)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
172*4882a593Smuzhiyun u32 offset_max;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
175*4882a593Smuzhiyun : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
176*4882a593Smuzhiyun if (mem->offset > offset_max ||
177*4882a593Smuzhiyun ipa->mem_offset > offset_max - mem->offset) {
178*4882a593Smuzhiyun dev_err(dev, "IPv%c %s%s table region offset too large\n",
179*4882a593Smuzhiyun ipv6 ? '6' : '4', hashed ? "hashed " : "",
180*4882a593Smuzhiyun route ? "route" : "filter");
181*4882a593Smuzhiyun dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
182*4882a593Smuzhiyun ipa->mem_offset, mem->offset, offset_max);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun return false;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (mem->offset > ipa->mem_size ||
188*4882a593Smuzhiyun mem->size > ipa->mem_size - mem->offset) {
189*4882a593Smuzhiyun dev_err(dev, "IPv%c %s%s table region out of range\n",
190*4882a593Smuzhiyun ipv6 ? '6' : '4', hashed ? "hashed " : "",
191*4882a593Smuzhiyun route ? "route" : "filter");
192*4882a593Smuzhiyun dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
193*4882a593Smuzhiyun mem->offset, mem->size, ipa->mem_size);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return false;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return true;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Validate the memory region that holds headers */
ipa_cmd_header_valid(struct ipa * ipa)202*4882a593Smuzhiyun static bool ipa_cmd_header_valid(struct ipa *ipa)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
205*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
206*4882a593Smuzhiyun u32 offset_max;
207*4882a593Smuzhiyun u32 size_max;
208*4882a593Smuzhiyun u32 size;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* In ipa_cmd_hdr_init_local_add() we record the offset and size
211*4882a593Smuzhiyun * of the header table memory area. Make sure the offset and size
212*4882a593Smuzhiyun * fit in the fields that need to hold them, and that the entire
213*4882a593Smuzhiyun * range is within the overall IPA memory range.
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
216*4882a593Smuzhiyun if (mem->offset > offset_max ||
217*4882a593Smuzhiyun ipa->mem_offset > offset_max - mem->offset) {
218*4882a593Smuzhiyun dev_err(dev, "header table region offset too large\n");
219*4882a593Smuzhiyun dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
220*4882a593Smuzhiyun ipa->mem_offset, mem->offset, offset_max);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return false;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
226*4882a593Smuzhiyun size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
227*4882a593Smuzhiyun size += ipa->mem[IPA_MEM_AP_HEADER].size;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (size > size_max) {
230*4882a593Smuzhiyun dev_err(dev, "header table region size too large\n");
231*4882a593Smuzhiyun dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return false;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
236*4882a593Smuzhiyun dev_err(dev, "header table region out of range\n");
237*4882a593Smuzhiyun dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
238*4882a593Smuzhiyun mem->offset, size, ipa->mem_size);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return false;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return true;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Indicate whether an offset can be used with a register_write command */
ipa_cmd_register_write_offset_valid(struct ipa * ipa,const char * name,u32 offset)247*4882a593Smuzhiyun static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
248*4882a593Smuzhiyun const char *name, u32 offset)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct ipa_cmd_register_write *payload;
251*4882a593Smuzhiyun struct device *dev = &ipa->pdev->dev;
252*4882a593Smuzhiyun u32 offset_max;
253*4882a593Smuzhiyun u32 bit_count;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* The maximum offset in a register_write immediate command depends
256*4882a593Smuzhiyun * on the version of IPA. IPA v3.5.1 supports a 16 bit offset, but
257*4882a593Smuzhiyun * newer versions allow some additional high-order bits.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun bit_count = BITS_PER_BYTE * sizeof(payload->offset);
260*4882a593Smuzhiyun if (ipa->version != IPA_VERSION_3_5_1)
261*4882a593Smuzhiyun bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
262*4882a593Smuzhiyun BUILD_BUG_ON(bit_count > 32);
263*4882a593Smuzhiyun offset_max = ~0U >> (32 - bit_count);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Make sure the offset can be represented by the field(s)
266*4882a593Smuzhiyun * that holds it. Also make sure the offset is not outside
267*4882a593Smuzhiyun * the overall IPA memory range.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
270*4882a593Smuzhiyun dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
271*4882a593Smuzhiyun name, ipa->mem_offset, offset, offset_max);
272*4882a593Smuzhiyun return false;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return true;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Check whether offsets passed to register_write are valid */
ipa_cmd_register_write_valid(struct ipa * ipa)279*4882a593Smuzhiyun static bool ipa_cmd_register_write_valid(struct ipa *ipa)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun const char *name;
282*4882a593Smuzhiyun u32 offset;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* If hashed tables are supported, ensure the hash flush register
285*4882a593Smuzhiyun * offset will fit in a register write IPA immediate command.
286*4882a593Smuzhiyun */
287*4882a593Smuzhiyun if (ipa->version != IPA_VERSION_4_2) {
288*4882a593Smuzhiyun offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
289*4882a593Smuzhiyun name = "filter/route hash flush";
290*4882a593Smuzhiyun if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
291*4882a593Smuzhiyun return false;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /* Each endpoint can have a status endpoint associated with it,
295*4882a593Smuzhiyun * and this is recorded in an endpoint register. If the modem
296*4882a593Smuzhiyun * crashes, we reset the status endpoint for all modem endpoints
297*4882a593Smuzhiyun * using a register write IPA immediate command. Make sure the
298*4882a593Smuzhiyun * worst case (highest endpoint number) offset of that endpoint
299*4882a593Smuzhiyun * fits in the register write command field(s) that must hold it.
300*4882a593Smuzhiyun */
301*4882a593Smuzhiyun offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
302*4882a593Smuzhiyun name = "maximal endpoint status";
303*4882a593Smuzhiyun if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
304*4882a593Smuzhiyun return false;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return true;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
ipa_cmd_data_valid(struct ipa * ipa)309*4882a593Smuzhiyun bool ipa_cmd_data_valid(struct ipa *ipa)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun if (!ipa_cmd_header_valid(ipa))
312*4882a593Smuzhiyun return false;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (!ipa_cmd_register_write_valid(ipa))
315*4882a593Smuzhiyun return false;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return true;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #endif /* IPA_VALIDATE */
321*4882a593Smuzhiyun
ipa_cmd_pool_init(struct gsi_channel * channel,u32 tre_max)322*4882a593Smuzhiyun int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct gsi_trans_info *trans_info = &channel->trans_info;
325*4882a593Smuzhiyun struct device *dev = channel->gsi->dev;
326*4882a593Smuzhiyun int ret;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* This is as good a place as any to validate build constants */
329*4882a593Smuzhiyun ipa_cmd_validate_build();
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* Even though command payloads are allocated one at a time,
332*4882a593Smuzhiyun * a single transaction can require up to tlv_count of them,
333*4882a593Smuzhiyun * so we treat them as if that many can be allocated at once.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
336*4882a593Smuzhiyun sizeof(union ipa_cmd_payload),
337*4882a593Smuzhiyun tre_max, channel->tlv_count);
338*4882a593Smuzhiyun if (ret)
339*4882a593Smuzhiyun return ret;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* Each TRE needs a command info structure */
342*4882a593Smuzhiyun ret = gsi_trans_pool_init(&trans_info->info_pool,
343*4882a593Smuzhiyun sizeof(struct ipa_cmd_info),
344*4882a593Smuzhiyun tre_max, channel->tlv_count);
345*4882a593Smuzhiyun if (ret)
346*4882a593Smuzhiyun gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return ret;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
ipa_cmd_pool_exit(struct gsi_channel * channel)351*4882a593Smuzhiyun void ipa_cmd_pool_exit(struct gsi_channel *channel)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct gsi_trans_info *trans_info = &channel->trans_info;
354*4882a593Smuzhiyun struct device *dev = channel->gsi->dev;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun gsi_trans_pool_exit(&trans_info->info_pool);
357*4882a593Smuzhiyun gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun static union ipa_cmd_payload *
ipa_cmd_payload_alloc(struct ipa * ipa,dma_addr_t * addr)361*4882a593Smuzhiyun ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct gsi_trans_info *trans_info;
364*4882a593Smuzhiyun struct ipa_endpoint *endpoint;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
367*4882a593Smuzhiyun trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /* If hash_size is 0, hash_offset and hash_addr ignored. */
ipa_cmd_table_init_add(struct gsi_trans * trans,enum ipa_cmd_opcode opcode,u16 size,u32 offset,dma_addr_t addr,u16 hash_size,u32 hash_offset,dma_addr_t hash_addr)373*4882a593Smuzhiyun void ipa_cmd_table_init_add(struct gsi_trans *trans,
374*4882a593Smuzhiyun enum ipa_cmd_opcode opcode, u16 size, u32 offset,
375*4882a593Smuzhiyun dma_addr_t addr, u16 hash_size, u32 hash_offset,
376*4882a593Smuzhiyun dma_addr_t hash_addr)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
379*4882a593Smuzhiyun enum dma_data_direction direction = DMA_TO_DEVICE;
380*4882a593Smuzhiyun struct ipa_cmd_hw_ip_fltrt_init *payload;
381*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
382*4882a593Smuzhiyun dma_addr_t payload_addr;
383*4882a593Smuzhiyun u64 val;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Record the non-hash table offset and size */
386*4882a593Smuzhiyun offset += ipa->mem_offset;
387*4882a593Smuzhiyun val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
388*4882a593Smuzhiyun val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /* The hash table offset and address are zero if its size is 0 */
391*4882a593Smuzhiyun if (hash_size) {
392*4882a593Smuzhiyun /* Record the hash table offset and size */
393*4882a593Smuzhiyun hash_offset += ipa->mem_offset;
394*4882a593Smuzhiyun val |= u64_encode_bits(hash_offset,
395*4882a593Smuzhiyun IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
396*4882a593Smuzhiyun val |= u64_encode_bits(hash_size,
397*4882a593Smuzhiyun IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
401*4882a593Smuzhiyun payload = &cmd_payload->table_init;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Fill in all offsets and sizes and the non-hash table address */
404*4882a593Smuzhiyun if (hash_size)
405*4882a593Smuzhiyun payload->hash_rules_addr = cpu_to_le64(hash_addr);
406*4882a593Smuzhiyun payload->flags = cpu_to_le64(val);
407*4882a593Smuzhiyun payload->nhash_rules_addr = cpu_to_le64(addr);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
410*4882a593Smuzhiyun direction, opcode);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* Initialize header space in IPA-local memory */
ipa_cmd_hdr_init_local_add(struct gsi_trans * trans,u32 offset,u16 size,dma_addr_t addr)414*4882a593Smuzhiyun void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
415*4882a593Smuzhiyun dma_addr_t addr)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
418*4882a593Smuzhiyun enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
419*4882a593Smuzhiyun enum dma_data_direction direction = DMA_TO_DEVICE;
420*4882a593Smuzhiyun struct ipa_cmd_hw_hdr_init_local *payload;
421*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
422*4882a593Smuzhiyun dma_addr_t payload_addr;
423*4882a593Smuzhiyun u32 flags;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun offset += ipa->mem_offset;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* With this command we tell the IPA where in its local memory the
428*4882a593Smuzhiyun * header tables reside. The content of the buffer provided is
429*4882a593Smuzhiyun * also written via DMA into that space. The IPA hardware owns
430*4882a593Smuzhiyun * the table, but the AP must initialize it.
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
433*4882a593Smuzhiyun payload = &cmd_payload->hdr_init_local;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun payload->hdr_table_addr = cpu_to_le64(addr);
436*4882a593Smuzhiyun flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
437*4882a593Smuzhiyun flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
438*4882a593Smuzhiyun payload->flags = cpu_to_le32(flags);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
441*4882a593Smuzhiyun direction, opcode);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
ipa_cmd_register_write_add(struct gsi_trans * trans,u32 offset,u32 value,u32 mask,bool clear_full)444*4882a593Smuzhiyun void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
445*4882a593Smuzhiyun u32 mask, bool clear_full)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
448*4882a593Smuzhiyun struct ipa_cmd_register_write *payload;
449*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
450*4882a593Smuzhiyun u32 opcode = IPA_CMD_REGISTER_WRITE;
451*4882a593Smuzhiyun dma_addr_t payload_addr;
452*4882a593Smuzhiyun u32 clear_option;
453*4882a593Smuzhiyun u32 options;
454*4882a593Smuzhiyun u16 flags;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* pipeline_clear_src_grp is not used */
457*4882a593Smuzhiyun clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (ipa->version != IPA_VERSION_3_5_1) {
460*4882a593Smuzhiyun u16 offset_high;
461*4882a593Smuzhiyun u32 val;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* Opcode encodes pipeline clear options */
464*4882a593Smuzhiyun /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
465*4882a593Smuzhiyun val = u16_encode_bits(clear_option,
466*4882a593Smuzhiyun REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
467*4882a593Smuzhiyun opcode |= val;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Extract the high 4 bits from the offset */
470*4882a593Smuzhiyun offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
471*4882a593Smuzhiyun offset &= (1 << 16) - 1;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* Extract the top 4 bits and encode it into the flags field */
474*4882a593Smuzhiyun flags = u16_encode_bits(offset_high,
475*4882a593Smuzhiyun REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
476*4882a593Smuzhiyun options = 0; /* reserved */
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun } else {
479*4882a593Smuzhiyun flags = 0; /* SKIP_CLEAR flag is always 0 */
480*4882a593Smuzhiyun options = u16_encode_bits(clear_option,
481*4882a593Smuzhiyun REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
485*4882a593Smuzhiyun payload = &cmd_payload->register_write;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun payload->flags = cpu_to_le16(flags);
488*4882a593Smuzhiyun payload->offset = cpu_to_le16((u16)offset);
489*4882a593Smuzhiyun payload->value = cpu_to_le32(value);
490*4882a593Smuzhiyun payload->value_mask = cpu_to_le32(mask);
491*4882a593Smuzhiyun payload->clear_options = cpu_to_le32(options);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
494*4882a593Smuzhiyun DMA_NONE, opcode);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* Skip IP packet processing on the next data transfer on a TX channel */
ipa_cmd_ip_packet_init_add(struct gsi_trans * trans,u8 endpoint_id)498*4882a593Smuzhiyun static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
501*4882a593Smuzhiyun enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
502*4882a593Smuzhiyun enum dma_data_direction direction = DMA_TO_DEVICE;
503*4882a593Smuzhiyun struct ipa_cmd_ip_packet_init *payload;
504*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
505*4882a593Smuzhiyun dma_addr_t payload_addr;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* assert(endpoint_id <
508*4882a593Smuzhiyun field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
511*4882a593Smuzhiyun payload = &cmd_payload->ip_packet_init;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun payload->dest_endpoint = u8_encode_bits(endpoint_id,
514*4882a593Smuzhiyun IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
517*4882a593Smuzhiyun direction, opcode);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Use a DMA command to read or write a block of IPA-resident memory */
ipa_cmd_dma_shared_mem_add(struct gsi_trans * trans,u32 offset,u16 size,dma_addr_t addr,bool toward_ipa)521*4882a593Smuzhiyun void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
522*4882a593Smuzhiyun dma_addr_t addr, bool toward_ipa)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
525*4882a593Smuzhiyun enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
526*4882a593Smuzhiyun struct ipa_cmd_hw_dma_mem_mem *payload;
527*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
528*4882a593Smuzhiyun enum dma_data_direction direction;
529*4882a593Smuzhiyun dma_addr_t payload_addr;
530*4882a593Smuzhiyun u16 flags;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* size and offset must fit in 16 bit fields */
533*4882a593Smuzhiyun /* assert(size > 0 && size <= U16_MAX); */
534*4882a593Smuzhiyun /* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun offset += ipa->mem_offset;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
539*4882a593Smuzhiyun payload = &cmd_payload->dma_shared_mem;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* payload->clear_after_read was reserved prior to IPA v4.0. It's
542*4882a593Smuzhiyun * never needed for current code, so it's 0 regardless of version.
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun payload->size = cpu_to_le16(size);
545*4882a593Smuzhiyun payload->local_addr = cpu_to_le16(offset);
546*4882a593Smuzhiyun /* payload->flags:
547*4882a593Smuzhiyun * direction: 0 = write to IPA, 1 read from IPA
548*4882a593Smuzhiyun * Starting at v4.0 these are reserved; either way, all zero:
549*4882a593Smuzhiyun * pipeline clear: 0 = wait for pipeline clear (don't skip)
550*4882a593Smuzhiyun * clear_options: 0 = pipeline_clear_hps
551*4882a593Smuzhiyun * Instead, for v4.0+ these are encoded in the opcode. But again
552*4882a593Smuzhiyun * since both values are 0 we won't bother OR'ing them in.
553*4882a593Smuzhiyun */
554*4882a593Smuzhiyun flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
555*4882a593Smuzhiyun payload->flags = cpu_to_le16(flags);
556*4882a593Smuzhiyun payload->system_addr = cpu_to_le64(addr);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
561*4882a593Smuzhiyun direction, opcode);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
ipa_cmd_ip_tag_status_add(struct gsi_trans * trans,u64 tag)564*4882a593Smuzhiyun static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
567*4882a593Smuzhiyun enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
568*4882a593Smuzhiyun enum dma_data_direction direction = DMA_TO_DEVICE;
569*4882a593Smuzhiyun struct ipa_cmd_ip_packet_tag_status *payload;
570*4882a593Smuzhiyun union ipa_cmd_payload *cmd_payload;
571*4882a593Smuzhiyun dma_addr_t payload_addr;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
576*4882a593Smuzhiyun payload = &cmd_payload->ip_packet_tag_status;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
581*4882a593Smuzhiyun direction, opcode);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Issue a small command TX data transfer */
ipa_cmd_transfer_add(struct gsi_trans * trans,u16 size)585*4882a593Smuzhiyun static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
588*4882a593Smuzhiyun enum dma_data_direction direction = DMA_TO_DEVICE;
589*4882a593Smuzhiyun enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
590*4882a593Smuzhiyun union ipa_cmd_payload *payload;
591*4882a593Smuzhiyun dma_addr_t payload_addr;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* assert(size <= sizeof(*payload)); */
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* Just transfer a zero-filled payload structure */
596*4882a593Smuzhiyun payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
599*4882a593Smuzhiyun direction, opcode);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
ipa_cmd_tag_process_add(struct gsi_trans * trans)602*4882a593Smuzhiyun void ipa_cmd_tag_process_add(struct gsi_trans *trans)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
605*4882a593Smuzhiyun struct ipa_endpoint *endpoint;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun ipa_cmd_register_write_add(trans, 0, 0, 0, true);
610*4882a593Smuzhiyun ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
611*4882a593Smuzhiyun ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
612*4882a593Smuzhiyun ipa_cmd_transfer_add(trans, 4);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* Returns the number of commands required for the tag process */
ipa_cmd_tag_process_count(void)616*4882a593Smuzhiyun u32 ipa_cmd_tag_process_count(void)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun return 4;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
ipa_cmd_tag_process(struct ipa * ipa)621*4882a593Smuzhiyun void ipa_cmd_tag_process(struct ipa *ipa)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun u32 count = ipa_cmd_tag_process_count();
624*4882a593Smuzhiyun struct gsi_trans *trans;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun trans = ipa_cmd_trans_alloc(ipa, count);
627*4882a593Smuzhiyun if (trans) {
628*4882a593Smuzhiyun ipa_cmd_tag_process_add(trans);
629*4882a593Smuzhiyun gsi_trans_commit_wait(trans);
630*4882a593Smuzhiyun } else {
631*4882a593Smuzhiyun dev_err(&ipa->pdev->dev,
632*4882a593Smuzhiyun "error allocating %u entry tag transaction\n", count);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint * endpoint,u32 tre_count)637*4882a593Smuzhiyun ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun struct gsi_channel *channel;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* Allocate a transaction for the command TX endpoint */
ipa_cmd_trans_alloc(struct ipa * ipa,u32 tre_count)647*4882a593Smuzhiyun struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun struct ipa_endpoint *endpoint;
650*4882a593Smuzhiyun struct gsi_trans *trans;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
655*4882a593Smuzhiyun tre_count, DMA_NONE);
656*4882a593Smuzhiyun if (trans)
657*4882a593Smuzhiyun trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun return trans;
660*4882a593Smuzhiyun }
661