1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "ena_com.h"
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*****************************************************************************/
9*4882a593Smuzhiyun /*****************************************************************************/
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /* Timeout in micro-sec */
12*4882a593Smuzhiyun #define ADMIN_CMD_TIMEOUT_US (3000000)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define ENA_ASYNC_QUEUE_DEPTH 16
15*4882a593Smuzhiyun #define ENA_ADMIN_QUEUE_DEPTH 32
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define ENA_CTRL_MAJOR 0
19*4882a593Smuzhiyun #define ENA_CTRL_MINOR 0
20*4882a593Smuzhiyun #define ENA_CTRL_SUB_MINOR 1
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define MIN_ENA_CTRL_VER \
23*4882a593Smuzhiyun (((ENA_CTRL_MAJOR) << \
24*4882a593Smuzhiyun (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25*4882a593Smuzhiyun ((ENA_CTRL_MINOR) << \
26*4882a593Smuzhiyun (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27*4882a593Smuzhiyun (ENA_CTRL_SUB_MINOR))
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30*4882a593Smuzhiyun #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define ENA_REGS_ADMIN_INTR_MASK 1
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define ENA_MIN_ADMIN_POLL_US 100
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define ENA_MAX_ADMIN_POLL_US 5000
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*****************************************************************************/
43*4882a593Smuzhiyun /*****************************************************************************/
44*4882a593Smuzhiyun /*****************************************************************************/
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun enum ena_cmd_status {
47*4882a593Smuzhiyun ENA_CMD_SUBMITTED,
48*4882a593Smuzhiyun ENA_CMD_COMPLETED,
49*4882a593Smuzhiyun /* Abort - canceled by the driver */
50*4882a593Smuzhiyun ENA_CMD_ABORTED,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct ena_comp_ctx {
54*4882a593Smuzhiyun struct completion wait_event;
55*4882a593Smuzhiyun struct ena_admin_acq_entry *user_cqe;
56*4882a593Smuzhiyun u32 comp_size;
57*4882a593Smuzhiyun enum ena_cmd_status status;
58*4882a593Smuzhiyun /* status from the device */
59*4882a593Smuzhiyun u8 comp_status;
60*4882a593Smuzhiyun u8 cmd_opcode;
61*4882a593Smuzhiyun bool occupied;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct ena_com_stats_ctx {
65*4882a593Smuzhiyun struct ena_admin_aq_get_stats_cmd get_cmd;
66*4882a593Smuzhiyun struct ena_admin_acq_get_stats_resp get_resp;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun
ena_com_mem_addr_set(struct ena_com_dev * ena_dev,struct ena_common_mem_addr * ena_addr,dma_addr_t addr)69*4882a593Smuzhiyun static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
70*4882a593Smuzhiyun struct ena_common_mem_addr *ena_addr,
71*4882a593Smuzhiyun dma_addr_t addr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
74*4882a593Smuzhiyun pr_err("DMA address has more bits that the device supports\n");
75*4882a593Smuzhiyun return -EINVAL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ena_addr->mem_addr_low = lower_32_bits(addr);
79*4882a593Smuzhiyun ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun return 0;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
ena_com_admin_init_sq(struct ena_com_admin_queue * admin_queue)84*4882a593Smuzhiyun static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct ena_com_admin_sq *sq = &admin_queue->sq;
87*4882a593Smuzhiyun u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
90*4882a593Smuzhiyun &sq->dma_addr, GFP_KERNEL);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (!sq->entries) {
93*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
94*4882a593Smuzhiyun return -ENOMEM;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun sq->head = 0;
98*4882a593Smuzhiyun sq->tail = 0;
99*4882a593Smuzhiyun sq->phase = 1;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun sq->db_addr = NULL;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
ena_com_admin_init_cq(struct ena_com_admin_queue * admin_queue)106*4882a593Smuzhiyun static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct ena_com_admin_cq *cq = &admin_queue->cq;
109*4882a593Smuzhiyun u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
112*4882a593Smuzhiyun &cq->dma_addr, GFP_KERNEL);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (!cq->entries) {
115*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
116*4882a593Smuzhiyun return -ENOMEM;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun cq->head = 0;
120*4882a593Smuzhiyun cq->phase = 1;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
ena_com_admin_init_aenq(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)125*4882a593Smuzhiyun static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
126*4882a593Smuzhiyun struct ena_aenq_handlers *aenq_handlers)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct ena_com_aenq *aenq = &ena_dev->aenq;
129*4882a593Smuzhiyun u32 addr_low, addr_high, aenq_caps;
130*4882a593Smuzhiyun u16 size;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
133*4882a593Smuzhiyun size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
134*4882a593Smuzhiyun aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
135*4882a593Smuzhiyun &aenq->dma_addr, GFP_KERNEL);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (!aenq->entries) {
138*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
139*4882a593Smuzhiyun return -ENOMEM;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun aenq->head = aenq->q_depth;
143*4882a593Smuzhiyun aenq->phase = 1;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
146*4882a593Smuzhiyun addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
149*4882a593Smuzhiyun writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun aenq_caps = 0;
152*4882a593Smuzhiyun aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
153*4882a593Smuzhiyun aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
154*4882a593Smuzhiyun << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
155*4882a593Smuzhiyun ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
156*4882a593Smuzhiyun writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (unlikely(!aenq_handlers)) {
159*4882a593Smuzhiyun pr_err("AENQ handlers pointer is NULL\n");
160*4882a593Smuzhiyun return -EINVAL;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun aenq->aenq_handlers = aenq_handlers;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
comp_ctxt_release(struct ena_com_admin_queue * queue,struct ena_comp_ctx * comp_ctx)168*4882a593Smuzhiyun static void comp_ctxt_release(struct ena_com_admin_queue *queue,
169*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun comp_ctx->occupied = false;
172*4882a593Smuzhiyun atomic_dec(&queue->outstanding_cmds);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
get_comp_ctxt(struct ena_com_admin_queue * admin_queue,u16 command_id,bool capture)175*4882a593Smuzhiyun static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
176*4882a593Smuzhiyun u16 command_id, bool capture)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun if (unlikely(command_id >= admin_queue->q_depth)) {
179*4882a593Smuzhiyun pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
180*4882a593Smuzhiyun command_id, admin_queue->q_depth);
181*4882a593Smuzhiyun return NULL;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (unlikely(!admin_queue->comp_ctx)) {
185*4882a593Smuzhiyun pr_err("Completion context is NULL\n");
186*4882a593Smuzhiyun return NULL;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
190*4882a593Smuzhiyun pr_err("Completion context is occupied\n");
191*4882a593Smuzhiyun return NULL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (capture) {
195*4882a593Smuzhiyun atomic_inc(&admin_queue->outstanding_cmds);
196*4882a593Smuzhiyun admin_queue->comp_ctx[command_id].occupied = true;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return &admin_queue->comp_ctx[command_id];
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
__ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)202*4882a593Smuzhiyun static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
203*4882a593Smuzhiyun struct ena_admin_aq_entry *cmd,
204*4882a593Smuzhiyun size_t cmd_size_in_bytes,
205*4882a593Smuzhiyun struct ena_admin_acq_entry *comp,
206*4882a593Smuzhiyun size_t comp_size_in_bytes)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
209*4882a593Smuzhiyun u16 tail_masked, cmd_id;
210*4882a593Smuzhiyun u16 queue_size_mask;
211*4882a593Smuzhiyun u16 cnt;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun queue_size_mask = admin_queue->q_depth - 1;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun tail_masked = admin_queue->sq.tail & queue_size_mask;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* In case of queue FULL */
218*4882a593Smuzhiyun cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
219*4882a593Smuzhiyun if (cnt >= admin_queue->q_depth) {
220*4882a593Smuzhiyun pr_debug("Admin queue is full.\n");
221*4882a593Smuzhiyun admin_queue->stats.out_of_space++;
222*4882a593Smuzhiyun return ERR_PTR(-ENOSPC);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun cmd_id = admin_queue->curr_cmd_id;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
228*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun cmd->aq_common_descriptor.command_id |= cmd_id &
231*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
234*4882a593Smuzhiyun if (unlikely(!comp_ctx))
235*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun comp_ctx->status = ENA_CMD_SUBMITTED;
238*4882a593Smuzhiyun comp_ctx->comp_size = (u32)comp_size_in_bytes;
239*4882a593Smuzhiyun comp_ctx->user_cqe = comp;
240*4882a593Smuzhiyun comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun reinit_completion(&comp_ctx->wait_event);
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
247*4882a593Smuzhiyun queue_size_mask;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun admin_queue->sq.tail++;
250*4882a593Smuzhiyun admin_queue->stats.submitted_cmd++;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
253*4882a593Smuzhiyun admin_queue->sq.phase = !admin_queue->sq.phase;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return comp_ctx;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
ena_com_init_comp_ctxt(struct ena_com_admin_queue * admin_queue)260*4882a593Smuzhiyun static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
263*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
264*4882a593Smuzhiyun u16 i;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun admin_queue->comp_ctx =
267*4882a593Smuzhiyun devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
268*4882a593Smuzhiyun if (unlikely(!admin_queue->comp_ctx)) {
269*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
270*4882a593Smuzhiyun return -ENOMEM;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun for (i = 0; i < admin_queue->q_depth; i++) {
274*4882a593Smuzhiyun comp_ctx = get_comp_ctxt(admin_queue, i, false);
275*4882a593Smuzhiyun if (comp_ctx)
276*4882a593Smuzhiyun init_completion(&comp_ctx->wait_event);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
ena_com_submit_admin_cmd(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size_in_bytes,struct ena_admin_acq_entry * comp,size_t comp_size_in_bytes)282*4882a593Smuzhiyun static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
283*4882a593Smuzhiyun struct ena_admin_aq_entry *cmd,
284*4882a593Smuzhiyun size_t cmd_size_in_bytes,
285*4882a593Smuzhiyun struct ena_admin_acq_entry *comp,
286*4882a593Smuzhiyun size_t comp_size_in_bytes)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun unsigned long flags = 0;
289*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
292*4882a593Smuzhiyun if (unlikely(!admin_queue->running_state)) {
293*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
294*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
297*4882a593Smuzhiyun cmd_size_in_bytes,
298*4882a593Smuzhiyun comp,
299*4882a593Smuzhiyun comp_size_in_bytes);
300*4882a593Smuzhiyun if (IS_ERR(comp_ctx))
301*4882a593Smuzhiyun admin_queue->running_state = false;
302*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun return comp_ctx;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
ena_com_init_io_sq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_sq * io_sq)307*4882a593Smuzhiyun static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
308*4882a593Smuzhiyun struct ena_com_create_io_ctx *ctx,
309*4882a593Smuzhiyun struct ena_com_io_sq *io_sq)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun size_t size;
312*4882a593Smuzhiyun int dev_node = 0;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
317*4882a593Smuzhiyun io_sq->desc_entry_size =
318*4882a593Smuzhiyun (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
319*4882a593Smuzhiyun sizeof(struct ena_eth_io_tx_desc) :
320*4882a593Smuzhiyun sizeof(struct ena_eth_io_rx_desc);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun size = io_sq->desc_entry_size * io_sq->q_depth;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
325*4882a593Smuzhiyun dev_node = dev_to_node(ena_dev->dmadev);
326*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, ctx->numa_node);
327*4882a593Smuzhiyun io_sq->desc_addr.virt_addr =
328*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, size,
329*4882a593Smuzhiyun &io_sq->desc_addr.phys_addr,
330*4882a593Smuzhiyun GFP_KERNEL);
331*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, dev_node);
332*4882a593Smuzhiyun if (!io_sq->desc_addr.virt_addr) {
333*4882a593Smuzhiyun io_sq->desc_addr.virt_addr =
334*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, size,
335*4882a593Smuzhiyun &io_sq->desc_addr.phys_addr,
336*4882a593Smuzhiyun GFP_KERNEL);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (!io_sq->desc_addr.virt_addr) {
340*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
341*4882a593Smuzhiyun return -ENOMEM;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
346*4882a593Smuzhiyun /* Allocate bounce buffers */
347*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.buffer_size =
348*4882a593Smuzhiyun ena_dev->llq_info.desc_list_entry_size;
349*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.buffers_num =
350*4882a593Smuzhiyun ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
351*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.next_to_use = 0;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun size = io_sq->bounce_buf_ctrl.buffer_size *
354*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.buffers_num;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun dev_node = dev_to_node(ena_dev->dmadev);
357*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, ctx->numa_node);
358*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.base_buffer =
359*4882a593Smuzhiyun devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
360*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, dev_node);
361*4882a593Smuzhiyun if (!io_sq->bounce_buf_ctrl.base_buffer)
362*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.base_buffer =
363*4882a593Smuzhiyun devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun if (!io_sq->bounce_buf_ctrl.base_buffer) {
366*4882a593Smuzhiyun pr_err("Bounce buffer memory allocation failed\n");
367*4882a593Smuzhiyun return -ENOMEM;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun memcpy(&io_sq->llq_info, &ena_dev->llq_info,
371*4882a593Smuzhiyun sizeof(io_sq->llq_info));
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* Initiate the first bounce buffer */
374*4882a593Smuzhiyun io_sq->llq_buf_ctrl.curr_bounce_buf =
375*4882a593Smuzhiyun ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
376*4882a593Smuzhiyun memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
377*4882a593Smuzhiyun 0x0, io_sq->llq_info.desc_list_entry_size);
378*4882a593Smuzhiyun io_sq->llq_buf_ctrl.descs_left_in_line =
379*4882a593Smuzhiyun io_sq->llq_info.descs_num_before_header;
380*4882a593Smuzhiyun io_sq->disable_meta_caching =
381*4882a593Smuzhiyun io_sq->llq_info.disable_meta_caching;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (io_sq->llq_info.max_entries_in_tx_burst > 0)
384*4882a593Smuzhiyun io_sq->entries_in_tx_burst_left =
385*4882a593Smuzhiyun io_sq->llq_info.max_entries_in_tx_burst;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun io_sq->tail = 0;
389*4882a593Smuzhiyun io_sq->next_to_comp = 0;
390*4882a593Smuzhiyun io_sq->phase = 1;
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
ena_com_init_io_cq(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx,struct ena_com_io_cq * io_cq)395*4882a593Smuzhiyun static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
396*4882a593Smuzhiyun struct ena_com_create_io_ctx *ctx,
397*4882a593Smuzhiyun struct ena_com_io_cq *io_cq)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun size_t size;
400*4882a593Smuzhiyun int prev_node = 0;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Use the basic completion descriptor for Rx */
405*4882a593Smuzhiyun io_cq->cdesc_entry_size_in_bytes =
406*4882a593Smuzhiyun (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
407*4882a593Smuzhiyun sizeof(struct ena_eth_io_tx_cdesc) :
408*4882a593Smuzhiyun sizeof(struct ena_eth_io_rx_cdesc_base);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun prev_node = dev_to_node(ena_dev->dmadev);
413*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, ctx->numa_node);
414*4882a593Smuzhiyun io_cq->cdesc_addr.virt_addr =
415*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, size,
416*4882a593Smuzhiyun &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
417*4882a593Smuzhiyun set_dev_node(ena_dev->dmadev, prev_node);
418*4882a593Smuzhiyun if (!io_cq->cdesc_addr.virt_addr) {
419*4882a593Smuzhiyun io_cq->cdesc_addr.virt_addr =
420*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, size,
421*4882a593Smuzhiyun &io_cq->cdesc_addr.phys_addr,
422*4882a593Smuzhiyun GFP_KERNEL);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (!io_cq->cdesc_addr.virt_addr) {
426*4882a593Smuzhiyun pr_err("Memory allocation failed\n");
427*4882a593Smuzhiyun return -ENOMEM;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun io_cq->phase = 1;
431*4882a593Smuzhiyun io_cq->head = 0;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
ena_com_handle_single_admin_completion(struct ena_com_admin_queue * admin_queue,struct ena_admin_acq_entry * cqe)436*4882a593Smuzhiyun static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
437*4882a593Smuzhiyun struct ena_admin_acq_entry *cqe)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
440*4882a593Smuzhiyun u16 cmd_id;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun cmd_id = cqe->acq_common_descriptor.command &
443*4882a593Smuzhiyun ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
446*4882a593Smuzhiyun if (unlikely(!comp_ctx)) {
447*4882a593Smuzhiyun pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
448*4882a593Smuzhiyun admin_queue->running_state = false;
449*4882a593Smuzhiyun return;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun comp_ctx->status = ENA_CMD_COMPLETED;
453*4882a593Smuzhiyun comp_ctx->comp_status = cqe->acq_common_descriptor.status;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (comp_ctx->user_cqe)
456*4882a593Smuzhiyun memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun if (!admin_queue->polling)
459*4882a593Smuzhiyun complete(&comp_ctx->wait_event);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
ena_com_handle_admin_completion(struct ena_com_admin_queue * admin_queue)462*4882a593Smuzhiyun static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun struct ena_admin_acq_entry *cqe = NULL;
465*4882a593Smuzhiyun u16 comp_num = 0;
466*4882a593Smuzhiyun u16 head_masked;
467*4882a593Smuzhiyun u8 phase;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
470*4882a593Smuzhiyun phase = admin_queue->cq.phase;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun cqe = &admin_queue->cq.entries[head_masked];
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Go over all the completions */
475*4882a593Smuzhiyun while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
476*4882a593Smuzhiyun ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
477*4882a593Smuzhiyun /* Do not read the rest of the completion entry before the
478*4882a593Smuzhiyun * phase bit was validated
479*4882a593Smuzhiyun */
480*4882a593Smuzhiyun dma_rmb();
481*4882a593Smuzhiyun ena_com_handle_single_admin_completion(admin_queue, cqe);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun head_masked++;
484*4882a593Smuzhiyun comp_num++;
485*4882a593Smuzhiyun if (unlikely(head_masked == admin_queue->q_depth)) {
486*4882a593Smuzhiyun head_masked = 0;
487*4882a593Smuzhiyun phase = !phase;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun cqe = &admin_queue->cq.entries[head_masked];
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun admin_queue->cq.head += comp_num;
494*4882a593Smuzhiyun admin_queue->cq.phase = phase;
495*4882a593Smuzhiyun admin_queue->sq.head += comp_num;
496*4882a593Smuzhiyun admin_queue->stats.completed_cmd += comp_num;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
ena_com_comp_status_to_errno(u8 comp_status)499*4882a593Smuzhiyun static int ena_com_comp_status_to_errno(u8 comp_status)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun if (unlikely(comp_status != 0))
502*4882a593Smuzhiyun pr_err("Admin command failed[%u]\n", comp_status);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun switch (comp_status) {
505*4882a593Smuzhiyun case ENA_ADMIN_SUCCESS:
506*4882a593Smuzhiyun return 0;
507*4882a593Smuzhiyun case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
508*4882a593Smuzhiyun return -ENOMEM;
509*4882a593Smuzhiyun case ENA_ADMIN_UNSUPPORTED_OPCODE:
510*4882a593Smuzhiyun return -EOPNOTSUPP;
511*4882a593Smuzhiyun case ENA_ADMIN_BAD_OPCODE:
512*4882a593Smuzhiyun case ENA_ADMIN_MALFORMED_REQUEST:
513*4882a593Smuzhiyun case ENA_ADMIN_ILLEGAL_PARAMETER:
514*4882a593Smuzhiyun case ENA_ADMIN_UNKNOWN_ERROR:
515*4882a593Smuzhiyun return -EINVAL;
516*4882a593Smuzhiyun case ENA_ADMIN_RESOURCE_BUSY:
517*4882a593Smuzhiyun return -EAGAIN;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun return -EINVAL;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
ena_delay_exponential_backoff_us(u32 exp,u32 delay_us)523*4882a593Smuzhiyun static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
526*4882a593Smuzhiyun delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
527*4882a593Smuzhiyun usleep_range(delay_us, 2 * delay_us);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)530*4882a593Smuzhiyun static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
531*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun unsigned long flags = 0;
534*4882a593Smuzhiyun unsigned long timeout;
535*4882a593Smuzhiyun int ret;
536*4882a593Smuzhiyun u32 exp = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun while (1) {
541*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
542*4882a593Smuzhiyun ena_com_handle_admin_completion(admin_queue);
543*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun if (comp_ctx->status != ENA_CMD_SUBMITTED)
546*4882a593Smuzhiyun break;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (time_is_before_jiffies(timeout)) {
549*4882a593Smuzhiyun pr_err("Wait for completion (polling) timeout\n");
550*4882a593Smuzhiyun /* ENA didn't have any completion */
551*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
552*4882a593Smuzhiyun admin_queue->stats.no_completion++;
553*4882a593Smuzhiyun admin_queue->running_state = false;
554*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ret = -ETIME;
557*4882a593Smuzhiyun goto err;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun ena_delay_exponential_backoff_us(exp++,
561*4882a593Smuzhiyun admin_queue->ena_dev->ena_min_poll_delay_us);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
565*4882a593Smuzhiyun pr_err("Command was aborted\n");
566*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
567*4882a593Smuzhiyun admin_queue->stats.aborted_cmd++;
568*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
569*4882a593Smuzhiyun ret = -ENODEV;
570*4882a593Smuzhiyun goto err;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
574*4882a593Smuzhiyun comp_ctx->status);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
577*4882a593Smuzhiyun err:
578*4882a593Smuzhiyun comp_ctxt_release(admin_queue, comp_ctx);
579*4882a593Smuzhiyun return ret;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun * Set the LLQ configurations of the firmware
584*4882a593Smuzhiyun *
585*4882a593Smuzhiyun * The driver provides only the enabled feature values to the device,
586*4882a593Smuzhiyun * which in turn, checks if they are supported.
587*4882a593Smuzhiyun */
ena_com_set_llq(struct ena_com_dev * ena_dev)588*4882a593Smuzhiyun static int ena_com_set_llq(struct ena_com_dev *ena_dev)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
591*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
592*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
593*4882a593Smuzhiyun struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
594*4882a593Smuzhiyun int ret;
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
597*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
600*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
603*4882a593Smuzhiyun cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
604*4882a593Smuzhiyun cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
605*4882a593Smuzhiyun cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun cmd.u.llq.accel_mode.u.set.enabled_flags =
608*4882a593Smuzhiyun BIT(ENA_ADMIN_DISABLE_META_CACHING) |
609*4882a593Smuzhiyun BIT(ENA_ADMIN_LIMIT_TX_BURST);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
612*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
613*4882a593Smuzhiyun sizeof(cmd),
614*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
615*4882a593Smuzhiyun sizeof(resp));
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (unlikely(ret))
618*4882a593Smuzhiyun pr_err("Failed to set LLQ configurations: %d\n", ret);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun return ret;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
ena_com_config_llq_info(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)623*4882a593Smuzhiyun static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
624*4882a593Smuzhiyun struct ena_admin_feature_llq_desc *llq_features,
625*4882a593Smuzhiyun struct ena_llq_configurations *llq_default_cfg)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
628*4882a593Smuzhiyun struct ena_admin_accel_mode_get llq_accel_mode_get;
629*4882a593Smuzhiyun u16 supported_feat;
630*4882a593Smuzhiyun int rc;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun memset(llq_info, 0, sizeof(*llq_info));
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun supported_feat = llq_features->header_location_ctrl_supported;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
637*4882a593Smuzhiyun llq_info->header_location_ctrl =
638*4882a593Smuzhiyun llq_default_cfg->llq_header_location;
639*4882a593Smuzhiyun } else {
640*4882a593Smuzhiyun pr_err("Invalid header location control, supported: 0x%x\n",
641*4882a593Smuzhiyun supported_feat);
642*4882a593Smuzhiyun return -EINVAL;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
646*4882a593Smuzhiyun supported_feat = llq_features->descriptors_stride_ctrl_supported;
647*4882a593Smuzhiyun if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
648*4882a593Smuzhiyun llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
649*4882a593Smuzhiyun } else {
650*4882a593Smuzhiyun if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
651*4882a593Smuzhiyun llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
652*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
653*4882a593Smuzhiyun llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
656*4882a593Smuzhiyun supported_feat);
657*4882a593Smuzhiyun return -EINVAL;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
661*4882a593Smuzhiyun llq_default_cfg->llq_stride_ctrl, supported_feat,
662*4882a593Smuzhiyun llq_info->desc_stride_ctrl);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun } else {
665*4882a593Smuzhiyun llq_info->desc_stride_ctrl = 0;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun supported_feat = llq_features->entry_size_ctrl_supported;
669*4882a593Smuzhiyun if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
670*4882a593Smuzhiyun llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
671*4882a593Smuzhiyun llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
672*4882a593Smuzhiyun } else {
673*4882a593Smuzhiyun if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
674*4882a593Smuzhiyun llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
675*4882a593Smuzhiyun llq_info->desc_list_entry_size = 128;
676*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
677*4882a593Smuzhiyun llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
678*4882a593Smuzhiyun llq_info->desc_list_entry_size = 192;
679*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
680*4882a593Smuzhiyun llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
681*4882a593Smuzhiyun llq_info->desc_list_entry_size = 256;
682*4882a593Smuzhiyun } else {
683*4882a593Smuzhiyun pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
684*4882a593Smuzhiyun supported_feat);
685*4882a593Smuzhiyun return -EINVAL;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
689*4882a593Smuzhiyun llq_default_cfg->llq_ring_entry_size, supported_feat,
690*4882a593Smuzhiyun llq_info->desc_list_entry_size);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
693*4882a593Smuzhiyun /* The desc list entry size should be whole multiply of 8
694*4882a593Smuzhiyun * This requirement comes from __iowrite64_copy()
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
697*4882a593Smuzhiyun return -EINVAL;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
701*4882a593Smuzhiyun llq_info->descs_per_entry = llq_info->desc_list_entry_size /
702*4882a593Smuzhiyun sizeof(struct ena_eth_io_tx_desc);
703*4882a593Smuzhiyun else
704*4882a593Smuzhiyun llq_info->descs_per_entry = 1;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun supported_feat = llq_features->desc_num_before_header_supported;
707*4882a593Smuzhiyun if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
708*4882a593Smuzhiyun llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
709*4882a593Smuzhiyun } else {
710*4882a593Smuzhiyun if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
711*4882a593Smuzhiyun llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
712*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
713*4882a593Smuzhiyun llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
714*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
715*4882a593Smuzhiyun llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
716*4882a593Smuzhiyun } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
717*4882a593Smuzhiyun llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
718*4882a593Smuzhiyun } else {
719*4882a593Smuzhiyun pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
720*4882a593Smuzhiyun supported_feat);
721*4882a593Smuzhiyun return -EINVAL;
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
725*4882a593Smuzhiyun llq_default_cfg->llq_num_decs_before_header,
726*4882a593Smuzhiyun supported_feat, llq_info->descs_num_before_header);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun /* Check for accelerated queue supported */
729*4882a593Smuzhiyun llq_accel_mode_get = llq_features->accel_mode.u.get;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun llq_info->disable_meta_caching =
732*4882a593Smuzhiyun !!(llq_accel_mode_get.supported_flags &
733*4882a593Smuzhiyun BIT(ENA_ADMIN_DISABLE_META_CACHING));
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
736*4882a593Smuzhiyun llq_info->max_entries_in_tx_burst =
737*4882a593Smuzhiyun llq_accel_mode_get.max_tx_burst_size /
738*4882a593Smuzhiyun llq_default_cfg->llq_ring_entry_size_value;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun rc = ena_com_set_llq(ena_dev);
741*4882a593Smuzhiyun if (rc)
742*4882a593Smuzhiyun pr_err("Cannot set LLQ configuration: %d\n", rc);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun return rc;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)747*4882a593Smuzhiyun static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
748*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun unsigned long flags = 0;
751*4882a593Smuzhiyun int ret;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun wait_for_completion_timeout(&comp_ctx->wait_event,
754*4882a593Smuzhiyun usecs_to_jiffies(
755*4882a593Smuzhiyun admin_queue->completion_timeout));
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* In case the command wasn't completed find out the root cause.
758*4882a593Smuzhiyun * There might be 2 kinds of errors
759*4882a593Smuzhiyun * 1) No completion (timeout reached)
760*4882a593Smuzhiyun * 2) There is completion but the device didn't get any msi-x interrupt.
761*4882a593Smuzhiyun */
762*4882a593Smuzhiyun if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
763*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
764*4882a593Smuzhiyun ena_com_handle_admin_completion(admin_queue);
765*4882a593Smuzhiyun admin_queue->stats.no_completion++;
766*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun if (comp_ctx->status == ENA_CMD_COMPLETED) {
769*4882a593Smuzhiyun pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
770*4882a593Smuzhiyun comp_ctx->cmd_opcode,
771*4882a593Smuzhiyun admin_queue->auto_polling ? "ON" : "OFF");
772*4882a593Smuzhiyun /* Check if fallback to polling is enabled */
773*4882a593Smuzhiyun if (admin_queue->auto_polling)
774*4882a593Smuzhiyun admin_queue->polling = true;
775*4882a593Smuzhiyun } else {
776*4882a593Smuzhiyun pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
777*4882a593Smuzhiyun comp_ctx->cmd_opcode, comp_ctx->status);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun /* Check if shifted to polling mode.
780*4882a593Smuzhiyun * This will happen if there is a completion without an interrupt
781*4882a593Smuzhiyun * and autopolling mode is enabled. Continuing normal execution in such case
782*4882a593Smuzhiyun */
783*4882a593Smuzhiyun if (!admin_queue->polling) {
784*4882a593Smuzhiyun admin_queue->running_state = false;
785*4882a593Smuzhiyun ret = -ETIME;
786*4882a593Smuzhiyun goto err;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
791*4882a593Smuzhiyun err:
792*4882a593Smuzhiyun comp_ctxt_release(admin_queue, comp_ctx);
793*4882a593Smuzhiyun return ret;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* This method read the hardware device register through posting writes
797*4882a593Smuzhiyun * and waiting for response
798*4882a593Smuzhiyun * On timeout the function will return ENA_MMIO_READ_TIMEOUT
799*4882a593Smuzhiyun */
ena_com_reg_bar_read32(struct ena_com_dev * ena_dev,u16 offset)800*4882a593Smuzhiyun static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
803*4882a593Smuzhiyun volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
804*4882a593Smuzhiyun mmio_read->read_resp;
805*4882a593Smuzhiyun u32 mmio_read_reg, ret, i;
806*4882a593Smuzhiyun unsigned long flags = 0;
807*4882a593Smuzhiyun u32 timeout = mmio_read->reg_read_to;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun might_sleep();
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun if (timeout == 0)
812*4882a593Smuzhiyun timeout = ENA_REG_READ_TIMEOUT;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* If readless is disabled, perform regular read */
815*4882a593Smuzhiyun if (!mmio_read->readless_supported)
816*4882a593Smuzhiyun return readl(ena_dev->reg_bar + offset);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun spin_lock_irqsave(&mmio_read->lock, flags);
819*4882a593Smuzhiyun mmio_read->seq_num++;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun read_resp->req_id = mmio_read->seq_num + 0xDEAD;
822*4882a593Smuzhiyun mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
823*4882a593Smuzhiyun ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
824*4882a593Smuzhiyun mmio_read_reg |= mmio_read->seq_num &
825*4882a593Smuzhiyun ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun for (i = 0; i < timeout; i++) {
830*4882a593Smuzhiyun if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
831*4882a593Smuzhiyun break;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun udelay(1);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (unlikely(i == timeout)) {
837*4882a593Smuzhiyun pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
838*4882a593Smuzhiyun mmio_read->seq_num, offset, read_resp->req_id,
839*4882a593Smuzhiyun read_resp->reg_off);
840*4882a593Smuzhiyun ret = ENA_MMIO_READ_TIMEOUT;
841*4882a593Smuzhiyun goto err;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (read_resp->reg_off != offset) {
845*4882a593Smuzhiyun pr_err("Read failure: wrong offset provided\n");
846*4882a593Smuzhiyun ret = ENA_MMIO_READ_TIMEOUT;
847*4882a593Smuzhiyun } else {
848*4882a593Smuzhiyun ret = read_resp->reg_val;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun err:
851*4882a593Smuzhiyun spin_unlock_irqrestore(&mmio_read->lock, flags);
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun return ret;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /* There are two types to wait for completion.
857*4882a593Smuzhiyun * Polling mode - wait until the completion is available.
858*4882a593Smuzhiyun * Async mode - wait on wait queue until the completion is ready
859*4882a593Smuzhiyun * (or the timeout expired).
860*4882a593Smuzhiyun * It is expected that the IRQ called ena_com_handle_admin_completion
861*4882a593Smuzhiyun * to mark the completions.
862*4882a593Smuzhiyun */
ena_com_wait_and_process_admin_cq(struct ena_comp_ctx * comp_ctx,struct ena_com_admin_queue * admin_queue)863*4882a593Smuzhiyun static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
864*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun if (admin_queue->polling)
867*4882a593Smuzhiyun return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
868*4882a593Smuzhiyun admin_queue);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
871*4882a593Smuzhiyun admin_queue);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
ena_com_destroy_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq)874*4882a593Smuzhiyun static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
875*4882a593Smuzhiyun struct ena_com_io_sq *io_sq)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
878*4882a593Smuzhiyun struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
879*4882a593Smuzhiyun struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
880*4882a593Smuzhiyun u8 direction;
881*4882a593Smuzhiyun int ret;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
886*4882a593Smuzhiyun direction = ENA_ADMIN_SQ_DIRECTION_TX;
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun direction = ENA_ADMIN_SQ_DIRECTION_RX;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun destroy_cmd.sq.sq_identity |= (direction <<
891*4882a593Smuzhiyun ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
892*4882a593Smuzhiyun ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun destroy_cmd.sq.sq_idx = io_sq->idx;
895*4882a593Smuzhiyun destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
898*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&destroy_cmd,
899*4882a593Smuzhiyun sizeof(destroy_cmd),
900*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&destroy_resp,
901*4882a593Smuzhiyun sizeof(destroy_resp));
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun if (unlikely(ret && (ret != -ENODEV)))
904*4882a593Smuzhiyun pr_err("Failed to destroy io sq error: %d\n", ret);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return ret;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
ena_com_io_queue_free(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,struct ena_com_io_cq * io_cq)909*4882a593Smuzhiyun static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
910*4882a593Smuzhiyun struct ena_com_io_sq *io_sq,
911*4882a593Smuzhiyun struct ena_com_io_cq *io_cq)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun size_t size;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun if (io_cq->cdesc_addr.virt_addr) {
916*4882a593Smuzhiyun size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, size,
919*4882a593Smuzhiyun io_cq->cdesc_addr.virt_addr,
920*4882a593Smuzhiyun io_cq->cdesc_addr.phys_addr);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun io_cq->cdesc_addr.virt_addr = NULL;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (io_sq->desc_addr.virt_addr) {
926*4882a593Smuzhiyun size = io_sq->desc_entry_size * io_sq->q_depth;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, size,
929*4882a593Smuzhiyun io_sq->desc_addr.virt_addr,
930*4882a593Smuzhiyun io_sq->desc_addr.phys_addr);
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun io_sq->desc_addr.virt_addr = NULL;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (io_sq->bounce_buf_ctrl.base_buffer) {
936*4882a593Smuzhiyun devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
937*4882a593Smuzhiyun io_sq->bounce_buf_ctrl.base_buffer = NULL;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
wait_for_reset_state(struct ena_com_dev * ena_dev,u32 timeout,u16 exp_state)941*4882a593Smuzhiyun static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
942*4882a593Smuzhiyun u16 exp_state)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun u32 val, exp = 0;
945*4882a593Smuzhiyun unsigned long timeout_stamp;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Convert timeout from resolution of 100ms to us resolution. */
948*4882a593Smuzhiyun timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun while (1) {
951*4882a593Smuzhiyun val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
954*4882a593Smuzhiyun pr_err("Reg read timeout occurred\n");
955*4882a593Smuzhiyun return -ETIME;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
959*4882a593Smuzhiyun exp_state)
960*4882a593Smuzhiyun return 0;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (time_is_before_jiffies(timeout_stamp))
963*4882a593Smuzhiyun return -ETIME;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
ena_com_check_supported_feature_id(struct ena_com_dev * ena_dev,enum ena_admin_aq_feature_id feature_id)969*4882a593Smuzhiyun static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
970*4882a593Smuzhiyun enum ena_admin_aq_feature_id feature_id)
971*4882a593Smuzhiyun {
972*4882a593Smuzhiyun u32 feature_mask = 1 << feature_id;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* Device attributes is always supported */
975*4882a593Smuzhiyun if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
976*4882a593Smuzhiyun !(ena_dev->supported_features & feature_mask))
977*4882a593Smuzhiyun return false;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun return true;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
ena_com_get_feature_ex(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,dma_addr_t control_buf_dma_addr,u32 control_buff_size,u8 feature_ver)982*4882a593Smuzhiyun static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
983*4882a593Smuzhiyun struct ena_admin_get_feat_resp *get_resp,
984*4882a593Smuzhiyun enum ena_admin_aq_feature_id feature_id,
985*4882a593Smuzhiyun dma_addr_t control_buf_dma_addr,
986*4882a593Smuzhiyun u32 control_buff_size,
987*4882a593Smuzhiyun u8 feature_ver)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
990*4882a593Smuzhiyun struct ena_admin_get_feat_cmd get_cmd;
991*4882a593Smuzhiyun int ret;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
994*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n", feature_id);
995*4882a593Smuzhiyun return -EOPNOTSUPP;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun memset(&get_cmd, 0x0, sizeof(get_cmd));
999*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if (control_buff_size)
1004*4882a593Smuzhiyun get_cmd.aq_common_descriptor.flags =
1005*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1006*4882a593Smuzhiyun else
1007*4882a593Smuzhiyun get_cmd.aq_common_descriptor.flags = 0;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
1010*4882a593Smuzhiyun &get_cmd.control_buffer.address,
1011*4882a593Smuzhiyun control_buf_dma_addr);
1012*4882a593Smuzhiyun if (unlikely(ret)) {
1013*4882a593Smuzhiyun pr_err("Memory address set failed\n");
1014*4882a593Smuzhiyun return ret;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun get_cmd.control_buffer.length = control_buff_size;
1018*4882a593Smuzhiyun get_cmd.feat_common.feature_version = feature_ver;
1019*4882a593Smuzhiyun get_cmd.feat_common.feature_id = feature_id;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
1022*4882a593Smuzhiyun (struct ena_admin_aq_entry *)
1023*4882a593Smuzhiyun &get_cmd,
1024*4882a593Smuzhiyun sizeof(get_cmd),
1025*4882a593Smuzhiyun (struct ena_admin_acq_entry *)
1026*4882a593Smuzhiyun get_resp,
1027*4882a593Smuzhiyun sizeof(*get_resp));
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun if (unlikely(ret))
1030*4882a593Smuzhiyun pr_err("Failed to submit get_feature command %d error: %d\n",
1031*4882a593Smuzhiyun feature_id, ret);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun return ret;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
ena_com_get_feature(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * get_resp,enum ena_admin_aq_feature_id feature_id,u8 feature_ver)1036*4882a593Smuzhiyun static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1037*4882a593Smuzhiyun struct ena_admin_get_feat_resp *get_resp,
1038*4882a593Smuzhiyun enum ena_admin_aq_feature_id feature_id,
1039*4882a593Smuzhiyun u8 feature_ver)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun return ena_com_get_feature_ex(ena_dev,
1042*4882a593Smuzhiyun get_resp,
1043*4882a593Smuzhiyun feature_id,
1044*4882a593Smuzhiyun 0,
1045*4882a593Smuzhiyun 0,
1046*4882a593Smuzhiyun feature_ver);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
ena_com_get_current_hash_function(struct ena_com_dev * ena_dev)1049*4882a593Smuzhiyun int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun return ena_dev->rss.hash_func;
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
ena_com_hash_key_fill_default_key(struct ena_com_dev * ena_dev)1054*4882a593Smuzhiyun static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun struct ena_admin_feature_rss_flow_hash_control *hash_key =
1057*4882a593Smuzhiyun (ena_dev->rss).hash_key;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1060*4882a593Smuzhiyun /* The key buffer is stored in the device in an array of
1061*4882a593Smuzhiyun * uint32 elements.
1062*4882a593Smuzhiyun */
1063*4882a593Smuzhiyun hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
ena_com_hash_key_allocate(struct ena_com_dev * ena_dev)1066*4882a593Smuzhiyun static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(ena_dev,
1071*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION))
1072*4882a593Smuzhiyun return -EOPNOTSUPP;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun rss->hash_key =
1075*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1076*4882a593Smuzhiyun &rss->hash_key_dma_addr, GFP_KERNEL);
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (unlikely(!rss->hash_key))
1079*4882a593Smuzhiyun return -ENOMEM;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun return 0;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
ena_com_hash_key_destroy(struct ena_com_dev * ena_dev)1084*4882a593Smuzhiyun static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun if (rss->hash_key)
1089*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1090*4882a593Smuzhiyun rss->hash_key, rss->hash_key_dma_addr);
1091*4882a593Smuzhiyun rss->hash_key = NULL;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
ena_com_hash_ctrl_init(struct ena_com_dev * ena_dev)1094*4882a593Smuzhiyun static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun rss->hash_ctrl =
1099*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1100*4882a593Smuzhiyun &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (unlikely(!rss->hash_ctrl))
1103*4882a593Smuzhiyun return -ENOMEM;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun return 0;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
ena_com_hash_ctrl_destroy(struct ena_com_dev * ena_dev)1108*4882a593Smuzhiyun static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (rss->hash_ctrl)
1113*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1114*4882a593Smuzhiyun rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1115*4882a593Smuzhiyun rss->hash_ctrl = NULL;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
ena_com_indirect_table_allocate(struct ena_com_dev * ena_dev,u16 log_size)1118*4882a593Smuzhiyun static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1119*4882a593Smuzhiyun u16 log_size)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1122*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
1123*4882a593Smuzhiyun size_t tbl_size;
1124*4882a593Smuzhiyun int ret;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun ret = ena_com_get_feature(ena_dev, &get_resp,
1127*4882a593Smuzhiyun ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1128*4882a593Smuzhiyun if (unlikely(ret))
1129*4882a593Smuzhiyun return ret;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if ((get_resp.u.ind_table.min_size > log_size) ||
1132*4882a593Smuzhiyun (get_resp.u.ind_table.max_size < log_size)) {
1133*4882a593Smuzhiyun pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1134*4882a593Smuzhiyun 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1135*4882a593Smuzhiyun 1 << get_resp.u.ind_table.max_size);
1136*4882a593Smuzhiyun return -EINVAL;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun tbl_size = (1ULL << log_size) *
1140*4882a593Smuzhiyun sizeof(struct ena_admin_rss_ind_table_entry);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun rss->rss_ind_tbl =
1143*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1144*4882a593Smuzhiyun &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1145*4882a593Smuzhiyun if (unlikely(!rss->rss_ind_tbl))
1146*4882a593Smuzhiyun goto mem_err1;
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun tbl_size = (1ULL << log_size) * sizeof(u16);
1149*4882a593Smuzhiyun rss->host_rss_ind_tbl =
1150*4882a593Smuzhiyun devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1151*4882a593Smuzhiyun if (unlikely(!rss->host_rss_ind_tbl))
1152*4882a593Smuzhiyun goto mem_err2;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun rss->tbl_log_size = log_size;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun return 0;
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun mem_err2:
1159*4882a593Smuzhiyun tbl_size = (1ULL << log_size) *
1160*4882a593Smuzhiyun sizeof(struct ena_admin_rss_ind_table_entry);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1163*4882a593Smuzhiyun rss->rss_ind_tbl_dma_addr);
1164*4882a593Smuzhiyun rss->rss_ind_tbl = NULL;
1165*4882a593Smuzhiyun mem_err1:
1166*4882a593Smuzhiyun rss->tbl_log_size = 0;
1167*4882a593Smuzhiyun return -ENOMEM;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
ena_com_indirect_table_destroy(struct ena_com_dev * ena_dev)1170*4882a593Smuzhiyun static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1173*4882a593Smuzhiyun size_t tbl_size = (1ULL << rss->tbl_log_size) *
1174*4882a593Smuzhiyun sizeof(struct ena_admin_rss_ind_table_entry);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun if (rss->rss_ind_tbl)
1177*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1178*4882a593Smuzhiyun rss->rss_ind_tbl_dma_addr);
1179*4882a593Smuzhiyun rss->rss_ind_tbl = NULL;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun if (rss->host_rss_ind_tbl)
1182*4882a593Smuzhiyun devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1183*4882a593Smuzhiyun rss->host_rss_ind_tbl = NULL;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun
ena_com_create_io_sq(struct ena_com_dev * ena_dev,struct ena_com_io_sq * io_sq,u16 cq_idx)1186*4882a593Smuzhiyun static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1187*4882a593Smuzhiyun struct ena_com_io_sq *io_sq, u16 cq_idx)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1190*4882a593Smuzhiyun struct ena_admin_aq_create_sq_cmd create_cmd;
1191*4882a593Smuzhiyun struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1192*4882a593Smuzhiyun u8 direction;
1193*4882a593Smuzhiyun int ret;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun memset(&create_cmd, 0x0, sizeof(create_cmd));
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1200*4882a593Smuzhiyun direction = ENA_ADMIN_SQ_DIRECTION_TX;
1201*4882a593Smuzhiyun else
1202*4882a593Smuzhiyun direction = ENA_ADMIN_SQ_DIRECTION_RX;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun create_cmd.sq_identity |= (direction <<
1205*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1206*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1209*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1212*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1213*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun create_cmd.sq_caps_3 |=
1216*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun create_cmd.cq_idx = cq_idx;
1219*4882a593Smuzhiyun create_cmd.sq_depth = io_sq->q_depth;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1222*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
1223*4882a593Smuzhiyun &create_cmd.sq_ba,
1224*4882a593Smuzhiyun io_sq->desc_addr.phys_addr);
1225*4882a593Smuzhiyun if (unlikely(ret)) {
1226*4882a593Smuzhiyun pr_err("Memory address set failed\n");
1227*4882a593Smuzhiyun return ret;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
1232*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&create_cmd,
1233*4882a593Smuzhiyun sizeof(create_cmd),
1234*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&cmd_completion,
1235*4882a593Smuzhiyun sizeof(cmd_completion));
1236*4882a593Smuzhiyun if (unlikely(ret)) {
1237*4882a593Smuzhiyun pr_err("Failed to create IO SQ. error: %d\n", ret);
1238*4882a593Smuzhiyun return ret;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
1241*4882a593Smuzhiyun io_sq->idx = cmd_completion.sq_idx;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1244*4882a593Smuzhiyun (uintptr_t)cmd_completion.sq_doorbell_offset);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1247*4882a593Smuzhiyun io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1248*4882a593Smuzhiyun + cmd_completion.llq_headers_offset);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun io_sq->desc_addr.pbuf_dev_addr =
1251*4882a593Smuzhiyun (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1252*4882a593Smuzhiyun cmd_completion.llq_descriptors_offset);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun return ret;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun
ena_com_ind_tbl_convert_to_device(struct ena_com_dev * ena_dev)1260*4882a593Smuzhiyun static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1261*4882a593Smuzhiyun {
1262*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
1263*4882a593Smuzhiyun struct ena_com_io_sq *io_sq;
1264*4882a593Smuzhiyun u16 qid;
1265*4882a593Smuzhiyun int i;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1268*4882a593Smuzhiyun qid = rss->host_rss_ind_tbl[i];
1269*4882a593Smuzhiyun if (qid >= ENA_TOTAL_NUM_QUEUES)
1270*4882a593Smuzhiyun return -EINVAL;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun io_sq = &ena_dev->io_sq_queues[qid];
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1275*4882a593Smuzhiyun return -EINVAL;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun return 0;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
ena_com_update_intr_delay_resolution(struct ena_com_dev * ena_dev,u16 intr_delay_resolution)1283*4882a593Smuzhiyun static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1284*4882a593Smuzhiyun u16 intr_delay_resolution)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun if (unlikely(!intr_delay_resolution)) {
1289*4882a593Smuzhiyun pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1290*4882a593Smuzhiyun intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun /* update Rx */
1294*4882a593Smuzhiyun ena_dev->intr_moder_rx_interval =
1295*4882a593Smuzhiyun ena_dev->intr_moder_rx_interval *
1296*4882a593Smuzhiyun prev_intr_delay_resolution /
1297*4882a593Smuzhiyun intr_delay_resolution;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /* update Tx */
1300*4882a593Smuzhiyun ena_dev->intr_moder_tx_interval =
1301*4882a593Smuzhiyun ena_dev->intr_moder_tx_interval *
1302*4882a593Smuzhiyun prev_intr_delay_resolution /
1303*4882a593Smuzhiyun intr_delay_resolution;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun ena_dev->intr_delay_resolution = intr_delay_resolution;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /*****************************************************************************/
1309*4882a593Smuzhiyun /******************************* API ******************************/
1310*4882a593Smuzhiyun /*****************************************************************************/
1311*4882a593Smuzhiyun
ena_com_execute_admin_command(struct ena_com_admin_queue * admin_queue,struct ena_admin_aq_entry * cmd,size_t cmd_size,struct ena_admin_acq_entry * comp,size_t comp_size)1312*4882a593Smuzhiyun int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1313*4882a593Smuzhiyun struct ena_admin_aq_entry *cmd,
1314*4882a593Smuzhiyun size_t cmd_size,
1315*4882a593Smuzhiyun struct ena_admin_acq_entry *comp,
1316*4882a593Smuzhiyun size_t comp_size)
1317*4882a593Smuzhiyun {
1318*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
1319*4882a593Smuzhiyun int ret;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1322*4882a593Smuzhiyun comp, comp_size);
1323*4882a593Smuzhiyun if (IS_ERR(comp_ctx)) {
1324*4882a593Smuzhiyun if (comp_ctx == ERR_PTR(-ENODEV))
1325*4882a593Smuzhiyun pr_debug("Failed to submit command [%ld]\n",
1326*4882a593Smuzhiyun PTR_ERR(comp_ctx));
1327*4882a593Smuzhiyun else
1328*4882a593Smuzhiyun pr_err("Failed to submit command [%ld]\n",
1329*4882a593Smuzhiyun PTR_ERR(comp_ctx));
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun return PTR_ERR(comp_ctx);
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1335*4882a593Smuzhiyun if (unlikely(ret)) {
1336*4882a593Smuzhiyun if (admin_queue->running_state)
1337*4882a593Smuzhiyun pr_err("Failed to process command. ret = %d\n", ret);
1338*4882a593Smuzhiyun else
1339*4882a593Smuzhiyun pr_debug("Failed to process command. ret = %d\n", ret);
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun return ret;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
ena_com_create_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1344*4882a593Smuzhiyun int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1345*4882a593Smuzhiyun struct ena_com_io_cq *io_cq)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1348*4882a593Smuzhiyun struct ena_admin_aq_create_cq_cmd create_cmd;
1349*4882a593Smuzhiyun struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1350*4882a593Smuzhiyun int ret;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun memset(&create_cmd, 0x0, sizeof(create_cmd));
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1357*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1358*4882a593Smuzhiyun create_cmd.cq_caps_1 |=
1359*4882a593Smuzhiyun ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun create_cmd.msix_vector = io_cq->msix_vector;
1362*4882a593Smuzhiyun create_cmd.cq_depth = io_cq->q_depth;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
1365*4882a593Smuzhiyun &create_cmd.cq_ba,
1366*4882a593Smuzhiyun io_cq->cdesc_addr.phys_addr);
1367*4882a593Smuzhiyun if (unlikely(ret)) {
1368*4882a593Smuzhiyun pr_err("Memory address set failed\n");
1369*4882a593Smuzhiyun return ret;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
1373*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&create_cmd,
1374*4882a593Smuzhiyun sizeof(create_cmd),
1375*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&cmd_completion,
1376*4882a593Smuzhiyun sizeof(cmd_completion));
1377*4882a593Smuzhiyun if (unlikely(ret)) {
1378*4882a593Smuzhiyun pr_err("Failed to create IO CQ. error: %d\n", ret);
1379*4882a593Smuzhiyun return ret;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun io_cq->idx = cmd_completion.cq_idx;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1385*4882a593Smuzhiyun cmd_completion.cq_interrupt_unmask_register_offset);
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun if (cmd_completion.cq_head_db_register_offset)
1388*4882a593Smuzhiyun io_cq->cq_head_db_reg =
1389*4882a593Smuzhiyun (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1390*4882a593Smuzhiyun cmd_completion.cq_head_db_register_offset);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (cmd_completion.numa_node_register_offset)
1393*4882a593Smuzhiyun io_cq->numa_node_cfg_reg =
1394*4882a593Smuzhiyun (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1395*4882a593Smuzhiyun cmd_completion.numa_node_register_offset);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun return ret;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
ena_com_get_io_handlers(struct ena_com_dev * ena_dev,u16 qid,struct ena_com_io_sq ** io_sq,struct ena_com_io_cq ** io_cq)1402*4882a593Smuzhiyun int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1403*4882a593Smuzhiyun struct ena_com_io_sq **io_sq,
1404*4882a593Smuzhiyun struct ena_com_io_cq **io_cq)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun if (qid >= ENA_TOTAL_NUM_QUEUES) {
1407*4882a593Smuzhiyun pr_err("Invalid queue number %d but the max is %d\n", qid,
1408*4882a593Smuzhiyun ENA_TOTAL_NUM_QUEUES);
1409*4882a593Smuzhiyun return -EINVAL;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun *io_sq = &ena_dev->io_sq_queues[qid];
1413*4882a593Smuzhiyun *io_cq = &ena_dev->io_cq_queues[qid];
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun return 0;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
ena_com_abort_admin_commands(struct ena_com_dev * ena_dev)1418*4882a593Smuzhiyun void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1421*4882a593Smuzhiyun struct ena_comp_ctx *comp_ctx;
1422*4882a593Smuzhiyun u16 i;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun if (!admin_queue->comp_ctx)
1425*4882a593Smuzhiyun return;
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun for (i = 0; i < admin_queue->q_depth; i++) {
1428*4882a593Smuzhiyun comp_ctx = get_comp_ctxt(admin_queue, i, false);
1429*4882a593Smuzhiyun if (unlikely(!comp_ctx))
1430*4882a593Smuzhiyun break;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun comp_ctx->status = ENA_CMD_ABORTED;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun complete(&comp_ctx->wait_event);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun }
1437*4882a593Smuzhiyun
ena_com_wait_for_abort_completion(struct ena_com_dev * ena_dev)1438*4882a593Smuzhiyun void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1441*4882a593Smuzhiyun unsigned long flags = 0;
1442*4882a593Smuzhiyun u32 exp = 0;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
1445*4882a593Smuzhiyun while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1446*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1447*4882a593Smuzhiyun ena_delay_exponential_backoff_us(exp++,
1448*4882a593Smuzhiyun ena_dev->ena_min_poll_delay_us);
1449*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun
ena_com_destroy_io_cq(struct ena_com_dev * ena_dev,struct ena_com_io_cq * io_cq)1454*4882a593Smuzhiyun int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1455*4882a593Smuzhiyun struct ena_com_io_cq *io_cq)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458*4882a593Smuzhiyun struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1459*4882a593Smuzhiyun struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1460*4882a593Smuzhiyun int ret;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun destroy_cmd.cq_idx = io_cq->idx;
1465*4882a593Smuzhiyun destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
1468*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&destroy_cmd,
1469*4882a593Smuzhiyun sizeof(destroy_cmd),
1470*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&destroy_resp,
1471*4882a593Smuzhiyun sizeof(destroy_resp));
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun if (unlikely(ret && (ret != -ENODEV)))
1474*4882a593Smuzhiyun pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun return ret;
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
ena_com_get_admin_running_state(struct ena_com_dev * ena_dev)1479*4882a593Smuzhiyun bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1480*4882a593Smuzhiyun {
1481*4882a593Smuzhiyun return ena_dev->admin_queue.running_state;
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
ena_com_set_admin_running_state(struct ena_com_dev * ena_dev,bool state)1484*4882a593Smuzhiyun void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1487*4882a593Smuzhiyun unsigned long flags = 0;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun spin_lock_irqsave(&admin_queue->q_lock, flags);
1490*4882a593Smuzhiyun ena_dev->admin_queue.running_state = state;
1491*4882a593Smuzhiyun spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun
ena_com_admin_aenq_enable(struct ena_com_dev * ena_dev)1494*4882a593Smuzhiyun void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun u16 depth = ena_dev->aenq.q_depth;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun /* Init head_db to mark that all entries in the queue
1501*4882a593Smuzhiyun * are initially available
1502*4882a593Smuzhiyun */
1503*4882a593Smuzhiyun writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
ena_com_set_aenq_config(struct ena_com_dev * ena_dev,u32 groups_flag)1506*4882a593Smuzhiyun int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1507*4882a593Smuzhiyun {
1508*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
1509*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
1510*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
1511*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
1512*4882a593Smuzhiyun int ret;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1515*4882a593Smuzhiyun if (ret) {
1516*4882a593Smuzhiyun pr_info("Can't get aenq configuration\n");
1517*4882a593Smuzhiyun return ret;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1521*4882a593Smuzhiyun pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1522*4882a593Smuzhiyun get_resp.u.aenq.supported_groups, groups_flag);
1523*4882a593Smuzhiyun return -EOPNOTSUPP;
1524*4882a593Smuzhiyun }
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
1527*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1530*4882a593Smuzhiyun cmd.aq_common_descriptor.flags = 0;
1531*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1532*4882a593Smuzhiyun cmd.u.aenq.enabled_groups = groups_flag;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
1535*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
1536*4882a593Smuzhiyun sizeof(cmd),
1537*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
1538*4882a593Smuzhiyun sizeof(resp));
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun if (unlikely(ret))
1541*4882a593Smuzhiyun pr_err("Failed to config AENQ ret: %d\n", ret);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun return ret;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
ena_com_get_dma_width(struct ena_com_dev * ena_dev)1546*4882a593Smuzhiyun int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1549*4882a593Smuzhiyun int width;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1552*4882a593Smuzhiyun pr_err("Reg read timeout occurred\n");
1553*4882a593Smuzhiyun return -ETIME;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1557*4882a593Smuzhiyun ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun pr_debug("ENA dma width: %d\n", width);
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1562*4882a593Smuzhiyun pr_err("DMA width illegal value: %d\n", width);
1563*4882a593Smuzhiyun return -EINVAL;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun ena_dev->dma_addr_bits = width;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun return width;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun
ena_com_validate_version(struct ena_com_dev * ena_dev)1571*4882a593Smuzhiyun int ena_com_validate_version(struct ena_com_dev *ena_dev)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun u32 ver;
1574*4882a593Smuzhiyun u32 ctrl_ver;
1575*4882a593Smuzhiyun u32 ctrl_ver_masked;
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun /* Make sure the ENA version and the controller version are at least
1578*4882a593Smuzhiyun * as the driver expects
1579*4882a593Smuzhiyun */
1580*4882a593Smuzhiyun ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1581*4882a593Smuzhiyun ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1582*4882a593Smuzhiyun ENA_REGS_CONTROLLER_VERSION_OFF);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1585*4882a593Smuzhiyun (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1586*4882a593Smuzhiyun pr_err("Reg read timeout occurred\n");
1587*4882a593Smuzhiyun return -ETIME;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun pr_info("ENA device version: %d.%d\n",
1591*4882a593Smuzhiyun (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1592*4882a593Smuzhiyun ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1593*4882a593Smuzhiyun ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
1596*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1597*4882a593Smuzhiyun ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1598*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1599*4882a593Smuzhiyun ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1600*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1601*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1602*4882a593Smuzhiyun ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun ctrl_ver_masked =
1605*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1606*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1607*4882a593Smuzhiyun (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun /* Validate the ctrl version without the implementation ID */
1610*4882a593Smuzhiyun if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1611*4882a593Smuzhiyun pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1612*4882a593Smuzhiyun return -1;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun return 0;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev * ena_dev,struct ena_com_admin_queue * admin_queue)1619*4882a593Smuzhiyun ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1620*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue)
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun if (!admin_queue->comp_ctx)
1624*4882a593Smuzhiyun return;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun admin_queue->comp_ctx = NULL;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
ena_com_admin_destroy(struct ena_com_dev * ena_dev)1631*4882a593Smuzhiyun void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1632*4882a593Smuzhiyun {
1633*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1634*4882a593Smuzhiyun struct ena_com_admin_cq *cq = &admin_queue->cq;
1635*4882a593Smuzhiyun struct ena_com_admin_sq *sq = &admin_queue->sq;
1636*4882a593Smuzhiyun struct ena_com_aenq *aenq = &ena_dev->aenq;
1637*4882a593Smuzhiyun u16 size;
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1642*4882a593Smuzhiyun if (sq->entries)
1643*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1644*4882a593Smuzhiyun sq->dma_addr);
1645*4882a593Smuzhiyun sq->entries = NULL;
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1648*4882a593Smuzhiyun if (cq->entries)
1649*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1650*4882a593Smuzhiyun cq->dma_addr);
1651*4882a593Smuzhiyun cq->entries = NULL;
1652*4882a593Smuzhiyun
1653*4882a593Smuzhiyun size = ADMIN_AENQ_SIZE(aenq->q_depth);
1654*4882a593Smuzhiyun if (ena_dev->aenq.entries)
1655*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1656*4882a593Smuzhiyun aenq->dma_addr);
1657*4882a593Smuzhiyun aenq->entries = NULL;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
ena_com_set_admin_polling_mode(struct ena_com_dev * ena_dev,bool polling)1660*4882a593Smuzhiyun void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun u32 mask_value = 0;
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if (polling)
1665*4882a593Smuzhiyun mask_value = ENA_REGS_ADMIN_INTR_MASK;
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1668*4882a593Smuzhiyun ena_dev->admin_queue.polling = polling;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
ena_com_set_admin_auto_polling_mode(struct ena_com_dev * ena_dev,bool polling)1671*4882a593Smuzhiyun void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1672*4882a593Smuzhiyun bool polling)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun ena_dev->admin_queue.auto_polling = polling;
1675*4882a593Smuzhiyun }
1676*4882a593Smuzhiyun
ena_com_mmio_reg_read_request_init(struct ena_com_dev * ena_dev)1677*4882a593Smuzhiyun int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1678*4882a593Smuzhiyun {
1679*4882a593Smuzhiyun struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun spin_lock_init(&mmio_read->lock);
1682*4882a593Smuzhiyun mmio_read->read_resp =
1683*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev,
1684*4882a593Smuzhiyun sizeof(*mmio_read->read_resp),
1685*4882a593Smuzhiyun &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1686*4882a593Smuzhiyun if (unlikely(!mmio_read->read_resp))
1687*4882a593Smuzhiyun goto err;
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun mmio_read->read_resp->req_id = 0x0;
1692*4882a593Smuzhiyun mmio_read->seq_num = 0x0;
1693*4882a593Smuzhiyun mmio_read->readless_supported = true;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun return 0;
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun err:
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun return -ENOMEM;
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
ena_com_set_mmio_read_mode(struct ena_com_dev * ena_dev,bool readless_supported)1702*4882a593Smuzhiyun void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun mmio_read->readless_supported = readless_supported;
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun
ena_com_mmio_reg_read_request_destroy(struct ena_com_dev * ena_dev)1709*4882a593Smuzhiyun void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1714*4882a593Smuzhiyun writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1717*4882a593Smuzhiyun mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1718*4882a593Smuzhiyun
1719*4882a593Smuzhiyun mmio_read->read_resp = NULL;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev * ena_dev)1722*4882a593Smuzhiyun void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1723*4882a593Smuzhiyun {
1724*4882a593Smuzhiyun struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1725*4882a593Smuzhiyun u32 addr_low, addr_high;
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1728*4882a593Smuzhiyun addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1731*4882a593Smuzhiyun writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun
ena_com_admin_init(struct ena_com_dev * ena_dev,struct ena_aenq_handlers * aenq_handlers)1734*4882a593Smuzhiyun int ena_com_admin_init(struct ena_com_dev *ena_dev,
1735*4882a593Smuzhiyun struct ena_aenq_handlers *aenq_handlers)
1736*4882a593Smuzhiyun {
1737*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1738*4882a593Smuzhiyun u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1739*4882a593Smuzhiyun int ret;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1744*4882a593Smuzhiyun pr_err("Reg read timeout occurred\n");
1745*4882a593Smuzhiyun return -ETIME;
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1749*4882a593Smuzhiyun pr_err("Device isn't ready, abort com init\n");
1750*4882a593Smuzhiyun return -ENODEV;
1751*4882a593Smuzhiyun }
1752*4882a593Smuzhiyun
1753*4882a593Smuzhiyun admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun admin_queue->q_dmadev = ena_dev->dmadev;
1756*4882a593Smuzhiyun admin_queue->polling = false;
1757*4882a593Smuzhiyun admin_queue->curr_cmd_id = 0;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun atomic_set(&admin_queue->outstanding_cmds, 0);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun spin_lock_init(&admin_queue->q_lock);
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun ret = ena_com_init_comp_ctxt(admin_queue);
1764*4882a593Smuzhiyun if (ret)
1765*4882a593Smuzhiyun goto error;
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun ret = ena_com_admin_init_sq(admin_queue);
1768*4882a593Smuzhiyun if (ret)
1769*4882a593Smuzhiyun goto error;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun ret = ena_com_admin_init_cq(admin_queue);
1772*4882a593Smuzhiyun if (ret)
1773*4882a593Smuzhiyun goto error;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1776*4882a593Smuzhiyun ENA_REGS_AQ_DB_OFF);
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1779*4882a593Smuzhiyun addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1782*4882a593Smuzhiyun writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1785*4882a593Smuzhiyun addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1788*4882a593Smuzhiyun writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun aq_caps = 0;
1791*4882a593Smuzhiyun aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1792*4882a593Smuzhiyun aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1793*4882a593Smuzhiyun ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1794*4882a593Smuzhiyun ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun acq_caps = 0;
1797*4882a593Smuzhiyun acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1798*4882a593Smuzhiyun acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1799*4882a593Smuzhiyun ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1800*4882a593Smuzhiyun ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1803*4882a593Smuzhiyun writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1804*4882a593Smuzhiyun ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1805*4882a593Smuzhiyun if (ret)
1806*4882a593Smuzhiyun goto error;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun admin_queue->ena_dev = ena_dev;
1809*4882a593Smuzhiyun admin_queue->running_state = true;
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun return 0;
1812*4882a593Smuzhiyun error:
1813*4882a593Smuzhiyun ena_com_admin_destroy(ena_dev);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun return ret;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun
ena_com_create_io_queue(struct ena_com_dev * ena_dev,struct ena_com_create_io_ctx * ctx)1818*4882a593Smuzhiyun int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1819*4882a593Smuzhiyun struct ena_com_create_io_ctx *ctx)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun struct ena_com_io_sq *io_sq;
1822*4882a593Smuzhiyun struct ena_com_io_cq *io_cq;
1823*4882a593Smuzhiyun int ret;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1826*4882a593Smuzhiyun pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1827*4882a593Smuzhiyun ctx->qid, ENA_TOTAL_NUM_QUEUES);
1828*4882a593Smuzhiyun return -EINVAL;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
1831*4882a593Smuzhiyun io_sq = &ena_dev->io_sq_queues[ctx->qid];
1832*4882a593Smuzhiyun io_cq = &ena_dev->io_cq_queues[ctx->qid];
1833*4882a593Smuzhiyun
1834*4882a593Smuzhiyun memset(io_sq, 0x0, sizeof(*io_sq));
1835*4882a593Smuzhiyun memset(io_cq, 0x0, sizeof(*io_cq));
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun /* Init CQ */
1838*4882a593Smuzhiyun io_cq->q_depth = ctx->queue_size;
1839*4882a593Smuzhiyun io_cq->direction = ctx->direction;
1840*4882a593Smuzhiyun io_cq->qid = ctx->qid;
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun io_cq->msix_vector = ctx->msix_vector;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun io_sq->q_depth = ctx->queue_size;
1845*4882a593Smuzhiyun io_sq->direction = ctx->direction;
1846*4882a593Smuzhiyun io_sq->qid = ctx->qid;
1847*4882a593Smuzhiyun
1848*4882a593Smuzhiyun io_sq->mem_queue_type = ctx->mem_queue_type;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1851*4882a593Smuzhiyun /* header length is limited to 8 bits */
1852*4882a593Smuzhiyun io_sq->tx_max_header_size =
1853*4882a593Smuzhiyun min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1856*4882a593Smuzhiyun if (ret)
1857*4882a593Smuzhiyun goto error;
1858*4882a593Smuzhiyun ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1859*4882a593Smuzhiyun if (ret)
1860*4882a593Smuzhiyun goto error;
1861*4882a593Smuzhiyun
1862*4882a593Smuzhiyun ret = ena_com_create_io_cq(ena_dev, io_cq);
1863*4882a593Smuzhiyun if (ret)
1864*4882a593Smuzhiyun goto error;
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1867*4882a593Smuzhiyun if (ret)
1868*4882a593Smuzhiyun goto destroy_io_cq;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun return 0;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun destroy_io_cq:
1873*4882a593Smuzhiyun ena_com_destroy_io_cq(ena_dev, io_cq);
1874*4882a593Smuzhiyun error:
1875*4882a593Smuzhiyun ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1876*4882a593Smuzhiyun return ret;
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun
ena_com_destroy_io_queue(struct ena_com_dev * ena_dev,u16 qid)1879*4882a593Smuzhiyun void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1880*4882a593Smuzhiyun {
1881*4882a593Smuzhiyun struct ena_com_io_sq *io_sq;
1882*4882a593Smuzhiyun struct ena_com_io_cq *io_cq;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (qid >= ENA_TOTAL_NUM_QUEUES) {
1885*4882a593Smuzhiyun pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1886*4882a593Smuzhiyun ENA_TOTAL_NUM_QUEUES);
1887*4882a593Smuzhiyun return;
1888*4882a593Smuzhiyun }
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun io_sq = &ena_dev->io_sq_queues[qid];
1891*4882a593Smuzhiyun io_cq = &ena_dev->io_cq_queues[qid];
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun ena_com_destroy_io_sq(ena_dev, io_sq);
1894*4882a593Smuzhiyun ena_com_destroy_io_cq(ena_dev, io_cq);
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
ena_com_get_link_params(struct ena_com_dev * ena_dev,struct ena_admin_get_feat_resp * resp)1899*4882a593Smuzhiyun int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1900*4882a593Smuzhiyun struct ena_admin_get_feat_resp *resp)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1903*4882a593Smuzhiyun }
1904*4882a593Smuzhiyun
ena_com_get_dev_attr_feat(struct ena_com_dev * ena_dev,struct ena_com_dev_get_features_ctx * get_feat_ctx)1905*4882a593Smuzhiyun int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1906*4882a593Smuzhiyun struct ena_com_dev_get_features_ctx *get_feat_ctx)
1907*4882a593Smuzhiyun {
1908*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
1909*4882a593Smuzhiyun int rc;
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
1912*4882a593Smuzhiyun ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1913*4882a593Smuzhiyun if (rc)
1914*4882a593Smuzhiyun return rc;
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1917*4882a593Smuzhiyun sizeof(get_resp.u.dev_attr));
1918*4882a593Smuzhiyun
1919*4882a593Smuzhiyun ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1920*4882a593Smuzhiyun
1921*4882a593Smuzhiyun if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1922*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
1923*4882a593Smuzhiyun ENA_ADMIN_MAX_QUEUES_EXT,
1924*4882a593Smuzhiyun ENA_FEATURE_MAX_QUEUE_EXT_VER);
1925*4882a593Smuzhiyun if (rc)
1926*4882a593Smuzhiyun return rc;
1927*4882a593Smuzhiyun
1928*4882a593Smuzhiyun if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1929*4882a593Smuzhiyun return -EINVAL;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1932*4882a593Smuzhiyun sizeof(get_resp.u.max_queue_ext));
1933*4882a593Smuzhiyun ena_dev->tx_max_header_size =
1934*4882a593Smuzhiyun get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1935*4882a593Smuzhiyun } else {
1936*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
1937*4882a593Smuzhiyun ENA_ADMIN_MAX_QUEUES_NUM, 0);
1938*4882a593Smuzhiyun memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1939*4882a593Smuzhiyun sizeof(get_resp.u.max_queue));
1940*4882a593Smuzhiyun ena_dev->tx_max_header_size =
1941*4882a593Smuzhiyun get_resp.u.max_queue.max_header_size;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun if (rc)
1944*4882a593Smuzhiyun return rc;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
1948*4882a593Smuzhiyun ENA_ADMIN_AENQ_CONFIG, 0);
1949*4882a593Smuzhiyun if (rc)
1950*4882a593Smuzhiyun return rc;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1953*4882a593Smuzhiyun sizeof(get_resp.u.aenq));
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
1956*4882a593Smuzhiyun ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1957*4882a593Smuzhiyun if (rc)
1958*4882a593Smuzhiyun return rc;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1961*4882a593Smuzhiyun sizeof(get_resp.u.offload));
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun /* Driver hints isn't mandatory admin command. So in case the
1964*4882a593Smuzhiyun * command isn't supported set driver hints to 0
1965*4882a593Smuzhiyun */
1966*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun if (!rc)
1969*4882a593Smuzhiyun memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1970*4882a593Smuzhiyun sizeof(get_resp.u.hw_hints));
1971*4882a593Smuzhiyun else if (rc == -EOPNOTSUPP)
1972*4882a593Smuzhiyun memset(&get_feat_ctx->hw_hints, 0x0,
1973*4882a593Smuzhiyun sizeof(get_feat_ctx->hw_hints));
1974*4882a593Smuzhiyun else
1975*4882a593Smuzhiyun return rc;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1978*4882a593Smuzhiyun if (!rc)
1979*4882a593Smuzhiyun memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1980*4882a593Smuzhiyun sizeof(get_resp.u.llq));
1981*4882a593Smuzhiyun else if (rc == -EOPNOTSUPP)
1982*4882a593Smuzhiyun memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1983*4882a593Smuzhiyun else
1984*4882a593Smuzhiyun return rc;
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun return 0;
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun
ena_com_admin_q_comp_intr_handler(struct ena_com_dev * ena_dev)1989*4882a593Smuzhiyun void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1990*4882a593Smuzhiyun {
1991*4882a593Smuzhiyun ena_com_handle_admin_completion(&ena_dev->admin_queue);
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun /* ena_handle_specific_aenq_event:
1995*4882a593Smuzhiyun * return the handler that is relevant to the specific event group
1996*4882a593Smuzhiyun */
ena_com_get_specific_aenq_cb(struct ena_com_dev * ena_dev,u16 group)1997*4882a593Smuzhiyun static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
1998*4882a593Smuzhiyun u16 group)
1999*4882a593Smuzhiyun {
2000*4882a593Smuzhiyun struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2003*4882a593Smuzhiyun return aenq_handlers->handlers[group];
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun return aenq_handlers->unimplemented_handler;
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /* ena_aenq_intr_handler:
2009*4882a593Smuzhiyun * handles the aenq incoming events.
2010*4882a593Smuzhiyun * pop events from the queue and apply the specific handler
2011*4882a593Smuzhiyun */
ena_com_aenq_intr_handler(struct ena_com_dev * ena_dev,void * data)2012*4882a593Smuzhiyun void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2013*4882a593Smuzhiyun {
2014*4882a593Smuzhiyun struct ena_admin_aenq_entry *aenq_e;
2015*4882a593Smuzhiyun struct ena_admin_aenq_common_desc *aenq_common;
2016*4882a593Smuzhiyun struct ena_com_aenq *aenq = &ena_dev->aenq;
2017*4882a593Smuzhiyun u64 timestamp;
2018*4882a593Smuzhiyun ena_aenq_handler handler_cb;
2019*4882a593Smuzhiyun u16 masked_head, processed = 0;
2020*4882a593Smuzhiyun u8 phase;
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun masked_head = aenq->head & (aenq->q_depth - 1);
2023*4882a593Smuzhiyun phase = aenq->phase;
2024*4882a593Smuzhiyun aenq_e = &aenq->entries[masked_head]; /* Get first entry */
2025*4882a593Smuzhiyun aenq_common = &aenq_e->aenq_common_desc;
2026*4882a593Smuzhiyun
2027*4882a593Smuzhiyun /* Go over all the events */
2028*4882a593Smuzhiyun while ((READ_ONCE(aenq_common->flags) &
2029*4882a593Smuzhiyun ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2030*4882a593Smuzhiyun /* Make sure the phase bit (ownership) is as expected before
2031*4882a593Smuzhiyun * reading the rest of the descriptor.
2032*4882a593Smuzhiyun */
2033*4882a593Smuzhiyun dma_rmb();
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun timestamp = (u64)aenq_common->timestamp_low |
2036*4882a593Smuzhiyun ((u64)aenq_common->timestamp_high << 32);
2037*4882a593Smuzhiyun
2038*4882a593Smuzhiyun pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2039*4882a593Smuzhiyun aenq_common->group, aenq_common->syndrome, timestamp);
2040*4882a593Smuzhiyun
2041*4882a593Smuzhiyun /* Handle specific event*/
2042*4882a593Smuzhiyun handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2043*4882a593Smuzhiyun aenq_common->group);
2044*4882a593Smuzhiyun handler_cb(data, aenq_e); /* call the actual event handler*/
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun /* Get next event entry */
2047*4882a593Smuzhiyun masked_head++;
2048*4882a593Smuzhiyun processed++;
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun if (unlikely(masked_head == aenq->q_depth)) {
2051*4882a593Smuzhiyun masked_head = 0;
2052*4882a593Smuzhiyun phase = !phase;
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun aenq_e = &aenq->entries[masked_head];
2055*4882a593Smuzhiyun aenq_common = &aenq_e->aenq_common_desc;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun aenq->head += processed;
2059*4882a593Smuzhiyun aenq->phase = phase;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun /* Don't update aenq doorbell if there weren't any processed events */
2062*4882a593Smuzhiyun if (!processed)
2063*4882a593Smuzhiyun return;
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun /* write the aenq doorbell after all AENQ descriptors were read */
2066*4882a593Smuzhiyun mb();
2067*4882a593Smuzhiyun writel_relaxed((u32)aenq->head,
2068*4882a593Smuzhiyun ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun
ena_com_dev_reset(struct ena_com_dev * ena_dev,enum ena_regs_reset_reason_types reset_reason)2071*4882a593Smuzhiyun int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2072*4882a593Smuzhiyun enum ena_regs_reset_reason_types reset_reason)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun u32 stat, timeout, cap, reset_val;
2075*4882a593Smuzhiyun int rc;
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2078*4882a593Smuzhiyun cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2081*4882a593Smuzhiyun (cap == ENA_MMIO_READ_TIMEOUT))) {
2082*4882a593Smuzhiyun pr_err("Reg read32 timeout occurred\n");
2083*4882a593Smuzhiyun return -ETIME;
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2087*4882a593Smuzhiyun pr_err("Device isn't ready, can't reset device\n");
2088*4882a593Smuzhiyun return -EINVAL;
2089*4882a593Smuzhiyun }
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2092*4882a593Smuzhiyun ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2093*4882a593Smuzhiyun if (timeout == 0) {
2094*4882a593Smuzhiyun pr_err("Invalid timeout value\n");
2095*4882a593Smuzhiyun return -EINVAL;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun /* start reset */
2099*4882a593Smuzhiyun reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2100*4882a593Smuzhiyun reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2101*4882a593Smuzhiyun ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2102*4882a593Smuzhiyun writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun /* Write again the MMIO read request address */
2105*4882a593Smuzhiyun ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun rc = wait_for_reset_state(ena_dev, timeout,
2108*4882a593Smuzhiyun ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2109*4882a593Smuzhiyun if (rc != 0) {
2110*4882a593Smuzhiyun pr_err("Reset indication didn't turn on\n");
2111*4882a593Smuzhiyun return rc;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun /* reset done */
2115*4882a593Smuzhiyun writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2116*4882a593Smuzhiyun rc = wait_for_reset_state(ena_dev, timeout, 0);
2117*4882a593Smuzhiyun if (rc != 0) {
2118*4882a593Smuzhiyun pr_err("Reset indication didn't turn off\n");
2119*4882a593Smuzhiyun return rc;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2123*4882a593Smuzhiyun ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2124*4882a593Smuzhiyun if (timeout)
2125*4882a593Smuzhiyun /* the resolution of timeout reg is 100ms */
2126*4882a593Smuzhiyun ena_dev->admin_queue.completion_timeout = timeout * 100000;
2127*4882a593Smuzhiyun else
2128*4882a593Smuzhiyun ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun return 0;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun
ena_get_dev_stats(struct ena_com_dev * ena_dev,struct ena_com_stats_ctx * ctx,enum ena_admin_get_stats_type type)2133*4882a593Smuzhiyun static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2134*4882a593Smuzhiyun struct ena_com_stats_ctx *ctx,
2135*4882a593Smuzhiyun enum ena_admin_get_stats_type type)
2136*4882a593Smuzhiyun {
2137*4882a593Smuzhiyun struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2138*4882a593Smuzhiyun struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2139*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
2140*4882a593Smuzhiyun int ret;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2145*4882a593Smuzhiyun get_cmd->aq_common_descriptor.flags = 0;
2146*4882a593Smuzhiyun get_cmd->type = type;
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2149*4882a593Smuzhiyun (struct ena_admin_aq_entry *)get_cmd,
2150*4882a593Smuzhiyun sizeof(*get_cmd),
2151*4882a593Smuzhiyun (struct ena_admin_acq_entry *)get_resp,
2152*4882a593Smuzhiyun sizeof(*get_resp));
2153*4882a593Smuzhiyun
2154*4882a593Smuzhiyun if (unlikely(ret))
2155*4882a593Smuzhiyun pr_err("Failed to get stats. error: %d\n", ret);
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun return ret;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun
ena_com_get_eni_stats(struct ena_com_dev * ena_dev,struct ena_admin_eni_stats * stats)2160*4882a593Smuzhiyun int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2161*4882a593Smuzhiyun struct ena_admin_eni_stats *stats)
2162*4882a593Smuzhiyun {
2163*4882a593Smuzhiyun struct ena_com_stats_ctx ctx;
2164*4882a593Smuzhiyun int ret;
2165*4882a593Smuzhiyun
2166*4882a593Smuzhiyun memset(&ctx, 0x0, sizeof(ctx));
2167*4882a593Smuzhiyun ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2168*4882a593Smuzhiyun if (likely(ret == 0))
2169*4882a593Smuzhiyun memcpy(stats, &ctx.get_resp.u.eni_stats,
2170*4882a593Smuzhiyun sizeof(ctx.get_resp.u.eni_stats));
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun return ret;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
ena_com_get_dev_basic_stats(struct ena_com_dev * ena_dev,struct ena_admin_basic_stats * stats)2175*4882a593Smuzhiyun int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2176*4882a593Smuzhiyun struct ena_admin_basic_stats *stats)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun struct ena_com_stats_ctx ctx;
2179*4882a593Smuzhiyun int ret;
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun memset(&ctx, 0x0, sizeof(ctx));
2182*4882a593Smuzhiyun ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2183*4882a593Smuzhiyun if (likely(ret == 0))
2184*4882a593Smuzhiyun memcpy(stats, &ctx.get_resp.u.basic_stats,
2185*4882a593Smuzhiyun sizeof(ctx.get_resp.u.basic_stats));
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun return ret;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun
ena_com_set_dev_mtu(struct ena_com_dev * ena_dev,int mtu)2190*4882a593Smuzhiyun int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2191*4882a593Smuzhiyun {
2192*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
2193*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
2194*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
2195*4882a593Smuzhiyun int ret;
2196*4882a593Smuzhiyun
2197*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2198*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2199*4882a593Smuzhiyun return -EOPNOTSUPP;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
2203*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2206*4882a593Smuzhiyun cmd.aq_common_descriptor.flags = 0;
2207*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2208*4882a593Smuzhiyun cmd.u.mtu.mtu = mtu;
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2211*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
2212*4882a593Smuzhiyun sizeof(cmd),
2213*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
2214*4882a593Smuzhiyun sizeof(resp));
2215*4882a593Smuzhiyun
2216*4882a593Smuzhiyun if (unlikely(ret))
2217*4882a593Smuzhiyun pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun return ret;
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun
ena_com_get_offload_settings(struct ena_com_dev * ena_dev,struct ena_admin_feature_offload_desc * offload)2222*4882a593Smuzhiyun int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2223*4882a593Smuzhiyun struct ena_admin_feature_offload_desc *offload)
2224*4882a593Smuzhiyun {
2225*4882a593Smuzhiyun int ret;
2226*4882a593Smuzhiyun struct ena_admin_get_feat_resp resp;
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun ret = ena_com_get_feature(ena_dev, &resp,
2229*4882a593Smuzhiyun ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2230*4882a593Smuzhiyun if (unlikely(ret)) {
2231*4882a593Smuzhiyun pr_err("Failed to get offload capabilities %d\n", ret);
2232*4882a593Smuzhiyun return ret;
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun return 0;
2238*4882a593Smuzhiyun }
2239*4882a593Smuzhiyun
ena_com_set_hash_function(struct ena_com_dev * ena_dev)2240*4882a593Smuzhiyun int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2241*4882a593Smuzhiyun {
2242*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2243*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2244*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
2245*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
2246*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2247*4882a593Smuzhiyun int ret;
2248*4882a593Smuzhiyun
2249*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(ena_dev,
2250*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION)) {
2251*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n",
2252*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION);
2253*4882a593Smuzhiyun return -EOPNOTSUPP;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun /* Validate hash function is supported */
2257*4882a593Smuzhiyun ret = ena_com_get_feature(ena_dev, &get_resp,
2258*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2259*4882a593Smuzhiyun if (unlikely(ret))
2260*4882a593Smuzhiyun return ret;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2263*4882a593Smuzhiyun pr_err("Func hash %d isn't supported by device, abort\n",
2264*4882a593Smuzhiyun rss->hash_func);
2265*4882a593Smuzhiyun return -EOPNOTSUPP;
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
2269*4882a593Smuzhiyun
2270*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2271*4882a593Smuzhiyun cmd.aq_common_descriptor.flags =
2272*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2273*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2274*4882a593Smuzhiyun cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2275*4882a593Smuzhiyun cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
2278*4882a593Smuzhiyun &cmd.control_buffer.address,
2279*4882a593Smuzhiyun rss->hash_key_dma_addr);
2280*4882a593Smuzhiyun if (unlikely(ret)) {
2281*4882a593Smuzhiyun pr_err("Memory address set failed\n");
2282*4882a593Smuzhiyun return ret;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun cmd.control_buffer.length = sizeof(*rss->hash_key);
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2288*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
2289*4882a593Smuzhiyun sizeof(cmd),
2290*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
2291*4882a593Smuzhiyun sizeof(resp));
2292*4882a593Smuzhiyun if (unlikely(ret)) {
2293*4882a593Smuzhiyun pr_err("Failed to set hash function %d. error: %d\n",
2294*4882a593Smuzhiyun rss->hash_func, ret);
2295*4882a593Smuzhiyun return -EINVAL;
2296*4882a593Smuzhiyun }
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun return 0;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
ena_com_fill_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions func,const u8 * key,u16 key_len,u32 init_val)2301*4882a593Smuzhiyun int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2302*4882a593Smuzhiyun enum ena_admin_hash_functions func,
2303*4882a593Smuzhiyun const u8 *key, u16 key_len, u32 init_val)
2304*4882a593Smuzhiyun {
2305*4882a593Smuzhiyun struct ena_admin_feature_rss_flow_hash_control *hash_key;
2306*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2307*4882a593Smuzhiyun enum ena_admin_hash_functions old_func;
2308*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2309*4882a593Smuzhiyun int rc;
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun hash_key = rss->hash_key;
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun /* Make sure size is a mult of DWs */
2314*4882a593Smuzhiyun if (unlikely(key_len & 0x3))
2315*4882a593Smuzhiyun return -EINVAL;
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2318*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION,
2319*4882a593Smuzhiyun rss->hash_key_dma_addr,
2320*4882a593Smuzhiyun sizeof(*rss->hash_key), 0);
2321*4882a593Smuzhiyun if (unlikely(rc))
2322*4882a593Smuzhiyun return rc;
2323*4882a593Smuzhiyun
2324*4882a593Smuzhiyun if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2325*4882a593Smuzhiyun pr_err("Flow hash function %d isn't supported\n", func);
2326*4882a593Smuzhiyun return -EOPNOTSUPP;
2327*4882a593Smuzhiyun }
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun switch (func) {
2330*4882a593Smuzhiyun case ENA_ADMIN_TOEPLITZ:
2331*4882a593Smuzhiyun if (key) {
2332*4882a593Smuzhiyun if (key_len != sizeof(hash_key->key)) {
2333*4882a593Smuzhiyun pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2334*4882a593Smuzhiyun key_len, sizeof(hash_key->key));
2335*4882a593Smuzhiyun return -EINVAL;
2336*4882a593Smuzhiyun }
2337*4882a593Smuzhiyun memcpy(hash_key->key, key, key_len);
2338*4882a593Smuzhiyun rss->hash_init_val = init_val;
2339*4882a593Smuzhiyun hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun break;
2342*4882a593Smuzhiyun case ENA_ADMIN_CRC32:
2343*4882a593Smuzhiyun rss->hash_init_val = init_val;
2344*4882a593Smuzhiyun break;
2345*4882a593Smuzhiyun default:
2346*4882a593Smuzhiyun pr_err("Invalid hash function (%d)\n", func);
2347*4882a593Smuzhiyun return -EINVAL;
2348*4882a593Smuzhiyun }
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun old_func = rss->hash_func;
2351*4882a593Smuzhiyun rss->hash_func = func;
2352*4882a593Smuzhiyun rc = ena_com_set_hash_function(ena_dev);
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun /* Restore the old function */
2355*4882a593Smuzhiyun if (unlikely(rc))
2356*4882a593Smuzhiyun rss->hash_func = old_func;
2357*4882a593Smuzhiyun
2358*4882a593Smuzhiyun return rc;
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun
ena_com_get_hash_function(struct ena_com_dev * ena_dev,enum ena_admin_hash_functions * func)2361*4882a593Smuzhiyun int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2362*4882a593Smuzhiyun enum ena_admin_hash_functions *func)
2363*4882a593Smuzhiyun {
2364*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2365*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2366*4882a593Smuzhiyun int rc;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun if (unlikely(!func))
2369*4882a593Smuzhiyun return -EINVAL;
2370*4882a593Smuzhiyun
2371*4882a593Smuzhiyun rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2372*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_FUNCTION,
2373*4882a593Smuzhiyun rss->hash_key_dma_addr,
2374*4882a593Smuzhiyun sizeof(*rss->hash_key), 0);
2375*4882a593Smuzhiyun if (unlikely(rc))
2376*4882a593Smuzhiyun return rc;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun /* ffs() returns 1 in case the lsb is set */
2379*4882a593Smuzhiyun rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2380*4882a593Smuzhiyun if (rss->hash_func)
2381*4882a593Smuzhiyun rss->hash_func--;
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun *func = rss->hash_func;
2384*4882a593Smuzhiyun
2385*4882a593Smuzhiyun return 0;
2386*4882a593Smuzhiyun }
2387*4882a593Smuzhiyun
ena_com_get_hash_key(struct ena_com_dev * ena_dev,u8 * key)2388*4882a593Smuzhiyun int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2389*4882a593Smuzhiyun {
2390*4882a593Smuzhiyun struct ena_admin_feature_rss_flow_hash_control *hash_key =
2391*4882a593Smuzhiyun ena_dev->rss.hash_key;
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun if (key)
2394*4882a593Smuzhiyun memcpy(key, hash_key->key,
2395*4882a593Smuzhiyun (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2396*4882a593Smuzhiyun
2397*4882a593Smuzhiyun return 0;
2398*4882a593Smuzhiyun }
2399*4882a593Smuzhiyun
ena_com_get_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 * fields)2400*4882a593Smuzhiyun int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2401*4882a593Smuzhiyun enum ena_admin_flow_hash_proto proto,
2402*4882a593Smuzhiyun u16 *fields)
2403*4882a593Smuzhiyun {
2404*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2405*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2406*4882a593Smuzhiyun int rc;
2407*4882a593Smuzhiyun
2408*4882a593Smuzhiyun rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2409*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_INPUT,
2410*4882a593Smuzhiyun rss->hash_ctrl_dma_addr,
2411*4882a593Smuzhiyun sizeof(*rss->hash_ctrl), 0);
2412*4882a593Smuzhiyun if (unlikely(rc))
2413*4882a593Smuzhiyun return rc;
2414*4882a593Smuzhiyun
2415*4882a593Smuzhiyun if (fields)
2416*4882a593Smuzhiyun *fields = rss->hash_ctrl->selected_fields[proto].fields;
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun return 0;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun
ena_com_set_hash_ctrl(struct ena_com_dev * ena_dev)2421*4882a593Smuzhiyun int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2422*4882a593Smuzhiyun {
2423*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2424*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2425*4882a593Smuzhiyun struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2426*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
2427*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
2428*4882a593Smuzhiyun int ret;
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(ena_dev,
2431*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_INPUT)) {
2432*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n",
2433*4882a593Smuzhiyun ENA_ADMIN_RSS_HASH_INPUT);
2434*4882a593Smuzhiyun return -EOPNOTSUPP;
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2440*4882a593Smuzhiyun cmd.aq_common_descriptor.flags =
2441*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2442*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2443*4882a593Smuzhiyun cmd.u.flow_hash_input.enabled_input_sort =
2444*4882a593Smuzhiyun ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2445*4882a593Smuzhiyun ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
2448*4882a593Smuzhiyun &cmd.control_buffer.address,
2449*4882a593Smuzhiyun rss->hash_ctrl_dma_addr);
2450*4882a593Smuzhiyun if (unlikely(ret)) {
2451*4882a593Smuzhiyun pr_err("Memory address set failed\n");
2452*4882a593Smuzhiyun return ret;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun cmd.control_buffer.length = sizeof(*hash_ctrl);
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2457*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
2458*4882a593Smuzhiyun sizeof(cmd),
2459*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
2460*4882a593Smuzhiyun sizeof(resp));
2461*4882a593Smuzhiyun if (unlikely(ret))
2462*4882a593Smuzhiyun pr_err("Failed to set hash input. error: %d\n", ret);
2463*4882a593Smuzhiyun
2464*4882a593Smuzhiyun return ret;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
ena_com_set_default_hash_ctrl(struct ena_com_dev * ena_dev)2467*4882a593Smuzhiyun int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2470*4882a593Smuzhiyun struct ena_admin_feature_rss_hash_control *hash_ctrl =
2471*4882a593Smuzhiyun rss->hash_ctrl;
2472*4882a593Smuzhiyun u16 available_fields = 0;
2473*4882a593Smuzhiyun int rc, i;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun /* Get the supported hash input */
2476*4882a593Smuzhiyun rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2477*4882a593Smuzhiyun if (unlikely(rc))
2478*4882a593Smuzhiyun return rc;
2479*4882a593Smuzhiyun
2480*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2481*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2482*4882a593Smuzhiyun ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2485*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2486*4882a593Smuzhiyun ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2487*4882a593Smuzhiyun
2488*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2489*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2490*4882a593Smuzhiyun ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2491*4882a593Smuzhiyun
2492*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2493*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2494*4882a593Smuzhiyun ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2497*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2498*4882a593Smuzhiyun
2499*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2500*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2501*4882a593Smuzhiyun
2502*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2503*4882a593Smuzhiyun ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2504*4882a593Smuzhiyun
2505*4882a593Smuzhiyun hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2506*4882a593Smuzhiyun ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2507*4882a593Smuzhiyun
2508*4882a593Smuzhiyun for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2509*4882a593Smuzhiyun available_fields = hash_ctrl->selected_fields[i].fields &
2510*4882a593Smuzhiyun hash_ctrl->supported_fields[i].fields;
2511*4882a593Smuzhiyun if (available_fields != hash_ctrl->selected_fields[i].fields) {
2512*4882a593Smuzhiyun pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2513*4882a593Smuzhiyun i, hash_ctrl->supported_fields[i].fields,
2514*4882a593Smuzhiyun hash_ctrl->selected_fields[i].fields);
2515*4882a593Smuzhiyun return -EOPNOTSUPP;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun }
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun rc = ena_com_set_hash_ctrl(ena_dev);
2520*4882a593Smuzhiyun
2521*4882a593Smuzhiyun /* In case of failure, restore the old hash ctrl */
2522*4882a593Smuzhiyun if (unlikely(rc))
2523*4882a593Smuzhiyun ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun return rc;
2526*4882a593Smuzhiyun }
2527*4882a593Smuzhiyun
ena_com_fill_hash_ctrl(struct ena_com_dev * ena_dev,enum ena_admin_flow_hash_proto proto,u16 hash_fields)2528*4882a593Smuzhiyun int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2529*4882a593Smuzhiyun enum ena_admin_flow_hash_proto proto,
2530*4882a593Smuzhiyun u16 hash_fields)
2531*4882a593Smuzhiyun {
2532*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2533*4882a593Smuzhiyun struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2534*4882a593Smuzhiyun u16 supported_fields;
2535*4882a593Smuzhiyun int rc;
2536*4882a593Smuzhiyun
2537*4882a593Smuzhiyun if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2538*4882a593Smuzhiyun pr_err("Invalid proto num (%u)\n", proto);
2539*4882a593Smuzhiyun return -EINVAL;
2540*4882a593Smuzhiyun }
2541*4882a593Smuzhiyun
2542*4882a593Smuzhiyun /* Get the ctrl table */
2543*4882a593Smuzhiyun rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2544*4882a593Smuzhiyun if (unlikely(rc))
2545*4882a593Smuzhiyun return rc;
2546*4882a593Smuzhiyun
2547*4882a593Smuzhiyun /* Make sure all the fields are supported */
2548*4882a593Smuzhiyun supported_fields = hash_ctrl->supported_fields[proto].fields;
2549*4882a593Smuzhiyun if ((hash_fields & supported_fields) != hash_fields) {
2550*4882a593Smuzhiyun pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
2551*4882a593Smuzhiyun proto, hash_fields, supported_fields);
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun hash_ctrl->selected_fields[proto].fields = hash_fields;
2555*4882a593Smuzhiyun
2556*4882a593Smuzhiyun rc = ena_com_set_hash_ctrl(ena_dev);
2557*4882a593Smuzhiyun
2558*4882a593Smuzhiyun /* In case of failure, restore the old hash ctrl */
2559*4882a593Smuzhiyun if (unlikely(rc))
2560*4882a593Smuzhiyun ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2561*4882a593Smuzhiyun
2562*4882a593Smuzhiyun return 0;
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun
ena_com_indirect_table_fill_entry(struct ena_com_dev * ena_dev,u16 entry_idx,u16 entry_value)2565*4882a593Smuzhiyun int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2566*4882a593Smuzhiyun u16 entry_idx, u16 entry_value)
2567*4882a593Smuzhiyun {
2568*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2571*4882a593Smuzhiyun return -EINVAL;
2572*4882a593Smuzhiyun
2573*4882a593Smuzhiyun if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2574*4882a593Smuzhiyun return -EINVAL;
2575*4882a593Smuzhiyun
2576*4882a593Smuzhiyun rss->host_rss_ind_tbl[entry_idx] = entry_value;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun return 0;
2579*4882a593Smuzhiyun }
2580*4882a593Smuzhiyun
ena_com_indirect_table_set(struct ena_com_dev * ena_dev)2581*4882a593Smuzhiyun int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2582*4882a593Smuzhiyun {
2583*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2584*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2585*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
2586*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
2587*4882a593Smuzhiyun int ret;
2588*4882a593Smuzhiyun
2589*4882a593Smuzhiyun if (!ena_com_check_supported_feature_id(
2590*4882a593Smuzhiyun ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2591*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n",
2592*4882a593Smuzhiyun ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2593*4882a593Smuzhiyun return -EOPNOTSUPP;
2594*4882a593Smuzhiyun }
2595*4882a593Smuzhiyun
2596*4882a593Smuzhiyun ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2597*4882a593Smuzhiyun if (ret) {
2598*4882a593Smuzhiyun pr_err("Failed to convert host indirection table to device table\n");
2599*4882a593Smuzhiyun return ret;
2600*4882a593Smuzhiyun }
2601*4882a593Smuzhiyun
2602*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
2603*4882a593Smuzhiyun
2604*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2605*4882a593Smuzhiyun cmd.aq_common_descriptor.flags =
2606*4882a593Smuzhiyun ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2607*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2608*4882a593Smuzhiyun cmd.u.ind_table.size = rss->tbl_log_size;
2609*4882a593Smuzhiyun cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2610*4882a593Smuzhiyun
2611*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
2612*4882a593Smuzhiyun &cmd.control_buffer.address,
2613*4882a593Smuzhiyun rss->rss_ind_tbl_dma_addr);
2614*4882a593Smuzhiyun if (unlikely(ret)) {
2615*4882a593Smuzhiyun pr_err("Memory address set failed\n");
2616*4882a593Smuzhiyun return ret;
2617*4882a593Smuzhiyun }
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2620*4882a593Smuzhiyun sizeof(struct ena_admin_rss_ind_table_entry);
2621*4882a593Smuzhiyun
2622*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2623*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
2624*4882a593Smuzhiyun sizeof(cmd),
2625*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
2626*4882a593Smuzhiyun sizeof(resp));
2627*4882a593Smuzhiyun
2628*4882a593Smuzhiyun if (unlikely(ret))
2629*4882a593Smuzhiyun pr_err("Failed to set indirect table. error: %d\n", ret);
2630*4882a593Smuzhiyun
2631*4882a593Smuzhiyun return ret;
2632*4882a593Smuzhiyun }
2633*4882a593Smuzhiyun
ena_com_indirect_table_get(struct ena_com_dev * ena_dev,u32 * ind_tbl)2634*4882a593Smuzhiyun int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun struct ena_rss *rss = &ena_dev->rss;
2637*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2638*4882a593Smuzhiyun u32 tbl_size;
2639*4882a593Smuzhiyun int i, rc;
2640*4882a593Smuzhiyun
2641*4882a593Smuzhiyun tbl_size = (1ULL << rss->tbl_log_size) *
2642*4882a593Smuzhiyun sizeof(struct ena_admin_rss_ind_table_entry);
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2645*4882a593Smuzhiyun ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2646*4882a593Smuzhiyun rss->rss_ind_tbl_dma_addr,
2647*4882a593Smuzhiyun tbl_size, 0);
2648*4882a593Smuzhiyun if (unlikely(rc))
2649*4882a593Smuzhiyun return rc;
2650*4882a593Smuzhiyun
2651*4882a593Smuzhiyun if (!ind_tbl)
2652*4882a593Smuzhiyun return 0;
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun for (i = 0; i < (1 << rss->tbl_log_size); i++)
2655*4882a593Smuzhiyun ind_tbl[i] = rss->host_rss_ind_tbl[i];
2656*4882a593Smuzhiyun
2657*4882a593Smuzhiyun return 0;
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun
ena_com_rss_init(struct ena_com_dev * ena_dev,u16 indr_tbl_log_size)2660*4882a593Smuzhiyun int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2661*4882a593Smuzhiyun {
2662*4882a593Smuzhiyun int rc;
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2667*4882a593Smuzhiyun if (unlikely(rc))
2668*4882a593Smuzhiyun goto err_indr_tbl;
2669*4882a593Smuzhiyun
2670*4882a593Smuzhiyun /* The following function might return unsupported in case the
2671*4882a593Smuzhiyun * device doesn't support setting the key / hash function. We can safely
2672*4882a593Smuzhiyun * ignore this error and have indirection table support only.
2673*4882a593Smuzhiyun */
2674*4882a593Smuzhiyun rc = ena_com_hash_key_allocate(ena_dev);
2675*4882a593Smuzhiyun if (likely(!rc))
2676*4882a593Smuzhiyun ena_com_hash_key_fill_default_key(ena_dev);
2677*4882a593Smuzhiyun else if (rc != -EOPNOTSUPP)
2678*4882a593Smuzhiyun goto err_hash_key;
2679*4882a593Smuzhiyun
2680*4882a593Smuzhiyun rc = ena_com_hash_ctrl_init(ena_dev);
2681*4882a593Smuzhiyun if (unlikely(rc))
2682*4882a593Smuzhiyun goto err_hash_ctrl;
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun return 0;
2685*4882a593Smuzhiyun
2686*4882a593Smuzhiyun err_hash_ctrl:
2687*4882a593Smuzhiyun ena_com_hash_key_destroy(ena_dev);
2688*4882a593Smuzhiyun err_hash_key:
2689*4882a593Smuzhiyun ena_com_indirect_table_destroy(ena_dev);
2690*4882a593Smuzhiyun err_indr_tbl:
2691*4882a593Smuzhiyun
2692*4882a593Smuzhiyun return rc;
2693*4882a593Smuzhiyun }
2694*4882a593Smuzhiyun
ena_com_rss_destroy(struct ena_com_dev * ena_dev)2695*4882a593Smuzhiyun void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2696*4882a593Smuzhiyun {
2697*4882a593Smuzhiyun ena_com_indirect_table_destroy(ena_dev);
2698*4882a593Smuzhiyun ena_com_hash_key_destroy(ena_dev);
2699*4882a593Smuzhiyun ena_com_hash_ctrl_destroy(ena_dev);
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun
ena_com_allocate_host_info(struct ena_com_dev * ena_dev)2704*4882a593Smuzhiyun int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun host_attr->host_info =
2709*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2710*4882a593Smuzhiyun &host_attr->host_info_dma_addr, GFP_KERNEL);
2711*4882a593Smuzhiyun if (unlikely(!host_attr->host_info))
2712*4882a593Smuzhiyun return -ENOMEM;
2713*4882a593Smuzhiyun
2714*4882a593Smuzhiyun host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2715*4882a593Smuzhiyun ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2716*4882a593Smuzhiyun (ENA_COMMON_SPEC_VERSION_MINOR));
2717*4882a593Smuzhiyun
2718*4882a593Smuzhiyun return 0;
2719*4882a593Smuzhiyun }
2720*4882a593Smuzhiyun
ena_com_allocate_debug_area(struct ena_com_dev * ena_dev,u32 debug_area_size)2721*4882a593Smuzhiyun int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2722*4882a593Smuzhiyun u32 debug_area_size)
2723*4882a593Smuzhiyun {
2724*4882a593Smuzhiyun struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2725*4882a593Smuzhiyun
2726*4882a593Smuzhiyun host_attr->debug_area_virt_addr =
2727*4882a593Smuzhiyun dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2728*4882a593Smuzhiyun &host_attr->debug_area_dma_addr, GFP_KERNEL);
2729*4882a593Smuzhiyun if (unlikely(!host_attr->debug_area_virt_addr)) {
2730*4882a593Smuzhiyun host_attr->debug_area_size = 0;
2731*4882a593Smuzhiyun return -ENOMEM;
2732*4882a593Smuzhiyun }
2733*4882a593Smuzhiyun
2734*4882a593Smuzhiyun host_attr->debug_area_size = debug_area_size;
2735*4882a593Smuzhiyun
2736*4882a593Smuzhiyun return 0;
2737*4882a593Smuzhiyun }
2738*4882a593Smuzhiyun
ena_com_delete_host_info(struct ena_com_dev * ena_dev)2739*4882a593Smuzhiyun void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2742*4882a593Smuzhiyun
2743*4882a593Smuzhiyun if (host_attr->host_info) {
2744*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2745*4882a593Smuzhiyun host_attr->host_info_dma_addr);
2746*4882a593Smuzhiyun host_attr->host_info = NULL;
2747*4882a593Smuzhiyun }
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun
ena_com_delete_debug_area(struct ena_com_dev * ena_dev)2750*4882a593Smuzhiyun void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2751*4882a593Smuzhiyun {
2752*4882a593Smuzhiyun struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2753*4882a593Smuzhiyun
2754*4882a593Smuzhiyun if (host_attr->debug_area_virt_addr) {
2755*4882a593Smuzhiyun dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2756*4882a593Smuzhiyun host_attr->debug_area_virt_addr,
2757*4882a593Smuzhiyun host_attr->debug_area_dma_addr);
2758*4882a593Smuzhiyun host_attr->debug_area_virt_addr = NULL;
2759*4882a593Smuzhiyun }
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun
ena_com_set_host_attributes(struct ena_com_dev * ena_dev)2762*4882a593Smuzhiyun int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2763*4882a593Smuzhiyun {
2764*4882a593Smuzhiyun struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2765*4882a593Smuzhiyun struct ena_com_admin_queue *admin_queue;
2766*4882a593Smuzhiyun struct ena_admin_set_feat_cmd cmd;
2767*4882a593Smuzhiyun struct ena_admin_set_feat_resp resp;
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun int ret;
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun /* Host attribute config is called before ena_com_get_dev_attr_feat
2772*4882a593Smuzhiyun * so ena_com can't check if the feature is supported.
2773*4882a593Smuzhiyun */
2774*4882a593Smuzhiyun
2775*4882a593Smuzhiyun memset(&cmd, 0x0, sizeof(cmd));
2776*4882a593Smuzhiyun admin_queue = &ena_dev->admin_queue;
2777*4882a593Smuzhiyun
2778*4882a593Smuzhiyun cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2779*4882a593Smuzhiyun cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
2782*4882a593Smuzhiyun &cmd.u.host_attr.debug_ba,
2783*4882a593Smuzhiyun host_attr->debug_area_dma_addr);
2784*4882a593Smuzhiyun if (unlikely(ret)) {
2785*4882a593Smuzhiyun pr_err("Memory address set failed\n");
2786*4882a593Smuzhiyun return ret;
2787*4882a593Smuzhiyun }
2788*4882a593Smuzhiyun
2789*4882a593Smuzhiyun ret = ena_com_mem_addr_set(ena_dev,
2790*4882a593Smuzhiyun &cmd.u.host_attr.os_info_ba,
2791*4882a593Smuzhiyun host_attr->host_info_dma_addr);
2792*4882a593Smuzhiyun if (unlikely(ret)) {
2793*4882a593Smuzhiyun pr_err("Memory address set failed\n");
2794*4882a593Smuzhiyun return ret;
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun
2797*4882a593Smuzhiyun cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2798*4882a593Smuzhiyun
2799*4882a593Smuzhiyun ret = ena_com_execute_admin_command(admin_queue,
2800*4882a593Smuzhiyun (struct ena_admin_aq_entry *)&cmd,
2801*4882a593Smuzhiyun sizeof(cmd),
2802*4882a593Smuzhiyun (struct ena_admin_acq_entry *)&resp,
2803*4882a593Smuzhiyun sizeof(resp));
2804*4882a593Smuzhiyun
2805*4882a593Smuzhiyun if (unlikely(ret))
2806*4882a593Smuzhiyun pr_err("Failed to set host attributes: %d\n", ret);
2807*4882a593Smuzhiyun
2808*4882a593Smuzhiyun return ret;
2809*4882a593Smuzhiyun }
2810*4882a593Smuzhiyun
2811*4882a593Smuzhiyun /* Interrupt moderation */
ena_com_interrupt_moderation_supported(struct ena_com_dev * ena_dev)2812*4882a593Smuzhiyun bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2813*4882a593Smuzhiyun {
2814*4882a593Smuzhiyun return ena_com_check_supported_feature_id(ena_dev,
2815*4882a593Smuzhiyun ENA_ADMIN_INTERRUPT_MODERATION);
2816*4882a593Smuzhiyun }
2817*4882a593Smuzhiyun
ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,u32 intr_delay_resolution,u32 * intr_moder_interval)2818*4882a593Smuzhiyun static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2819*4882a593Smuzhiyun u32 intr_delay_resolution,
2820*4882a593Smuzhiyun u32 *intr_moder_interval)
2821*4882a593Smuzhiyun {
2822*4882a593Smuzhiyun if (!intr_delay_resolution) {
2823*4882a593Smuzhiyun pr_err("Illegal interrupt delay granularity value\n");
2824*4882a593Smuzhiyun return -EFAULT;
2825*4882a593Smuzhiyun }
2826*4882a593Smuzhiyun
2827*4882a593Smuzhiyun *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun return 0;
2830*4882a593Smuzhiyun }
2831*4882a593Smuzhiyun
ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev,u32 tx_coalesce_usecs)2832*4882a593Smuzhiyun int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2833*4882a593Smuzhiyun u32 tx_coalesce_usecs)
2834*4882a593Smuzhiyun {
2835*4882a593Smuzhiyun return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2836*4882a593Smuzhiyun ena_dev->intr_delay_resolution,
2837*4882a593Smuzhiyun &ena_dev->intr_moder_tx_interval);
2838*4882a593Smuzhiyun }
2839*4882a593Smuzhiyun
ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev,u32 rx_coalesce_usecs)2840*4882a593Smuzhiyun int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2841*4882a593Smuzhiyun u32 rx_coalesce_usecs)
2842*4882a593Smuzhiyun {
2843*4882a593Smuzhiyun return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2844*4882a593Smuzhiyun ena_dev->intr_delay_resolution,
2845*4882a593Smuzhiyun &ena_dev->intr_moder_rx_interval);
2846*4882a593Smuzhiyun }
2847*4882a593Smuzhiyun
ena_com_init_interrupt_moderation(struct ena_com_dev * ena_dev)2848*4882a593Smuzhiyun int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2849*4882a593Smuzhiyun {
2850*4882a593Smuzhiyun struct ena_admin_get_feat_resp get_resp;
2851*4882a593Smuzhiyun u16 delay_resolution;
2852*4882a593Smuzhiyun int rc;
2853*4882a593Smuzhiyun
2854*4882a593Smuzhiyun rc = ena_com_get_feature(ena_dev, &get_resp,
2855*4882a593Smuzhiyun ENA_ADMIN_INTERRUPT_MODERATION, 0);
2856*4882a593Smuzhiyun
2857*4882a593Smuzhiyun if (rc) {
2858*4882a593Smuzhiyun if (rc == -EOPNOTSUPP) {
2859*4882a593Smuzhiyun pr_debug("Feature %d isn't supported\n",
2860*4882a593Smuzhiyun ENA_ADMIN_INTERRUPT_MODERATION);
2861*4882a593Smuzhiyun rc = 0;
2862*4882a593Smuzhiyun } else {
2863*4882a593Smuzhiyun pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2864*4882a593Smuzhiyun rc);
2865*4882a593Smuzhiyun }
2866*4882a593Smuzhiyun
2867*4882a593Smuzhiyun /* no moderation supported, disable adaptive support */
2868*4882a593Smuzhiyun ena_com_disable_adaptive_moderation(ena_dev);
2869*4882a593Smuzhiyun return rc;
2870*4882a593Smuzhiyun }
2871*4882a593Smuzhiyun
2872*4882a593Smuzhiyun /* if moderation is supported by device we set adaptive moderation */
2873*4882a593Smuzhiyun delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2874*4882a593Smuzhiyun ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2875*4882a593Smuzhiyun
2876*4882a593Smuzhiyun /* Disable adaptive moderation by default - can be enabled later */
2877*4882a593Smuzhiyun ena_com_disable_adaptive_moderation(ena_dev);
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun return 0;
2880*4882a593Smuzhiyun }
2881*4882a593Smuzhiyun
ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev * ena_dev)2882*4882a593Smuzhiyun unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2883*4882a593Smuzhiyun {
2884*4882a593Smuzhiyun return ena_dev->intr_moder_tx_interval;
2885*4882a593Smuzhiyun }
2886*4882a593Smuzhiyun
ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev * ena_dev)2887*4882a593Smuzhiyun unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2888*4882a593Smuzhiyun {
2889*4882a593Smuzhiyun return ena_dev->intr_moder_rx_interval;
2890*4882a593Smuzhiyun }
2891*4882a593Smuzhiyun
ena_com_config_dev_mode(struct ena_com_dev * ena_dev,struct ena_admin_feature_llq_desc * llq_features,struct ena_llq_configurations * llq_default_cfg)2892*4882a593Smuzhiyun int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2893*4882a593Smuzhiyun struct ena_admin_feature_llq_desc *llq_features,
2894*4882a593Smuzhiyun struct ena_llq_configurations *llq_default_cfg)
2895*4882a593Smuzhiyun {
2896*4882a593Smuzhiyun struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2897*4882a593Smuzhiyun int rc;
2898*4882a593Smuzhiyun
2899*4882a593Smuzhiyun if (!llq_features->max_llq_num) {
2900*4882a593Smuzhiyun ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2901*4882a593Smuzhiyun return 0;
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2905*4882a593Smuzhiyun if (rc)
2906*4882a593Smuzhiyun return rc;
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2909*4882a593Smuzhiyun (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2910*4882a593Smuzhiyun
2911*4882a593Smuzhiyun if (unlikely(ena_dev->tx_max_header_size == 0)) {
2912*4882a593Smuzhiyun pr_err("The size of the LLQ entry is smaller than needed\n");
2913*4882a593Smuzhiyun return -EINVAL;
2914*4882a593Smuzhiyun }
2915*4882a593Smuzhiyun
2916*4882a593Smuzhiyun ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun return 0;
2919*4882a593Smuzhiyun }
2920