1*4882a593Smuzhiyun /********************************************************************** 2*4882a593Smuzhiyun * Author: Cavium, Inc. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Contact: support@cavium.com 5*4882a593Smuzhiyun * Please include "LiquidIO" in the subject. 6*4882a593Smuzhiyun * 7*4882a593Smuzhiyun * Copyright (c) 2003-2016 Cavium, Inc. 8*4882a593Smuzhiyun * 9*4882a593Smuzhiyun * This file is free software; you can redistribute it and/or modify 10*4882a593Smuzhiyun * it under the terms of the GNU General Public License, Version 2, as 11*4882a593Smuzhiyun * published by the Free Software Foundation. 12*4882a593Smuzhiyun * 13*4882a593Smuzhiyun * This file is distributed in the hope that it will be useful, but 14*4882a593Smuzhiyun * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16*4882a593Smuzhiyun * NONINFRINGEMENT. See the GNU General Public License for more details. 17*4882a593Smuzhiyun ***********************************************************************/ 18*4882a593Smuzhiyun /*! \file octeon_iq.h 19*4882a593Smuzhiyun * \brief Host Driver: Implementation of Octeon input queues. "Input" is 20*4882a593Smuzhiyun * with respect to the Octeon device on the NIC. From this driver's 21*4882a593Smuzhiyun * point of view they are egress queues. 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #ifndef __OCTEON_IQ_H__ 25*4882a593Smuzhiyun #define __OCTEON_IQ_H__ 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun #define IQ_STATUS_RUNNING 1 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #define IQ_SEND_OK 0 30*4882a593Smuzhiyun #define IQ_SEND_STOP 1 31*4882a593Smuzhiyun #define IQ_SEND_FAILED -1 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun /*------------------------- INSTRUCTION QUEUE --------------------------*/ 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun /* \cond */ 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #define REQTYPE_NONE 0 38*4882a593Smuzhiyun #define REQTYPE_NORESP_NET 1 39*4882a593Smuzhiyun #define REQTYPE_NORESP_NET_SG 2 40*4882a593Smuzhiyun #define REQTYPE_RESP_NET 3 41*4882a593Smuzhiyun #define REQTYPE_RESP_NET_SG 4 42*4882a593Smuzhiyun #define REQTYPE_SOFT_COMMAND 5 43*4882a593Smuzhiyun #define REQTYPE_LAST 5 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun struct octeon_request_list { 46*4882a593Smuzhiyun u32 reqtype; 47*4882a593Smuzhiyun void *buf; 48*4882a593Smuzhiyun }; 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun /* \endcond */ 51*4882a593Smuzhiyun 52*4882a593Smuzhiyun /** Input Queue statistics. Each input queue has four stats fields. */ 53*4882a593Smuzhiyun struct oct_iq_stats { 54*4882a593Smuzhiyun u64 instr_posted; /**< Instructions posted to this queue. */ 55*4882a593Smuzhiyun u64 instr_processed; /**< Instructions processed in this queue. */ 56*4882a593Smuzhiyun u64 instr_dropped; /**< Instructions that could not be processed */ 57*4882a593Smuzhiyun u64 bytes_sent; /**< Bytes sent through this queue. */ 58*4882a593Smuzhiyun u64 sgentry_sent;/**< Gather entries sent through this queue. */ 59*4882a593Smuzhiyun u64 tx_done;/**< Num of packets sent to network. */ 60*4882a593Smuzhiyun u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ 61*4882a593Smuzhiyun u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ 62*4882a593Smuzhiyun u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ 63*4882a593Smuzhiyun u64 tx_gso; /* count of tso */ 64*4882a593Smuzhiyun u64 tx_vxlan; /* tunnel */ 65*4882a593Smuzhiyun u64 tx_dmamap_fail; /* Number of times dma mapping failed */ 66*4882a593Smuzhiyun u64 tx_restart; /* Number of times this queue restarted */ 67*4882a593Smuzhiyun }; 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun #define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun /** The instruction (input) queue. 72*4882a593Smuzhiyun * The input queue is used to post raw (instruction) mode data or packet 73*4882a593Smuzhiyun * data to Octeon device from the host. Each input queue (upto 4) for 74*4882a593Smuzhiyun * a Octeon device has one such structure to represent it. 75*4882a593Smuzhiyun */ 76*4882a593Smuzhiyun struct octeon_instr_queue { 77*4882a593Smuzhiyun struct octeon_device *oct_dev; 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun /** A spinlock to protect access to the input ring. */ 80*4882a593Smuzhiyun spinlock_t lock; 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun /** A spinlock to protect while posting on the ring. */ 83*4882a593Smuzhiyun spinlock_t post_lock; 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun /** This flag indicates if the queue can be used for soft commands. 86*4882a593Smuzhiyun * If this flag is set, post_lock must be acquired before posting 87*4882a593Smuzhiyun * a command to the queue. 88*4882a593Smuzhiyun * If this flag is clear, post_lock is invalid for the queue. 89*4882a593Smuzhiyun * All control commands (soft commands) will go through only Queue 0 90*4882a593Smuzhiyun * (control and data queue). So only queue-0 needs post_lock, 91*4882a593Smuzhiyun * other queues are only data queues and does not need post_lock 92*4882a593Smuzhiyun */ 93*4882a593Smuzhiyun bool allow_soft_cmds; 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun u32 pkt_in_done; 96*4882a593Smuzhiyun 97*4882a593Smuzhiyun u32 pkts_processed; 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun /** A spinlock to protect access to the input ring.*/ 100*4882a593Smuzhiyun spinlock_t iq_flush_running_lock; 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun /** Flag that indicates if the queue uses 64 byte commands. */ 103*4882a593Smuzhiyun u32 iqcmd_64B:1; 104*4882a593Smuzhiyun 105*4882a593Smuzhiyun /** Queue info. */ 106*4882a593Smuzhiyun union oct_txpciq txpciq; 107*4882a593Smuzhiyun 108*4882a593Smuzhiyun u32 rsvd:17; 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun /* Controls whether extra flushing of IQ is done on Tx */ 111*4882a593Smuzhiyun u32 do_auto_flush:1; 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun u32 status:8; 114*4882a593Smuzhiyun 115*4882a593Smuzhiyun /** Maximum no. of instructions in this queue. */ 116*4882a593Smuzhiyun u32 max_count; 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun /** Index in input ring where the driver should write the next packet */ 119*4882a593Smuzhiyun u32 host_write_index; 120*4882a593Smuzhiyun 121*4882a593Smuzhiyun /** Index in input ring where Octeon is expected to read the next 122*4882a593Smuzhiyun * packet. 123*4882a593Smuzhiyun */ 124*4882a593Smuzhiyun u32 octeon_read_index; 125*4882a593Smuzhiyun 126*4882a593Smuzhiyun /** This index aids in finding the window in the queue where Octeon 127*4882a593Smuzhiyun * has read the commands. 128*4882a593Smuzhiyun */ 129*4882a593Smuzhiyun u32 flush_index; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun /** This field keeps track of the instructions pending in this queue. */ 132*4882a593Smuzhiyun atomic_t instr_pending; 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun u32 reset_instr_cnt; 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun /** Pointer to the Virtual Base addr of the input ring. */ 137*4882a593Smuzhiyun u8 *base_addr; 138*4882a593Smuzhiyun 139*4882a593Smuzhiyun struct octeon_request_list *request_list; 140*4882a593Smuzhiyun 141*4882a593Smuzhiyun /** Octeon doorbell register for the ring. */ 142*4882a593Smuzhiyun void __iomem *doorbell_reg; 143*4882a593Smuzhiyun 144*4882a593Smuzhiyun /** Octeon instruction count register for this ring. */ 145*4882a593Smuzhiyun void __iomem *inst_cnt_reg; 146*4882a593Smuzhiyun 147*4882a593Smuzhiyun /** Number of instructions pending to be posted to Octeon. */ 148*4882a593Smuzhiyun u32 fill_cnt; 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun /** The max. number of instructions that can be held pending by the 151*4882a593Smuzhiyun * driver. 152*4882a593Smuzhiyun */ 153*4882a593Smuzhiyun u32 fill_threshold; 154*4882a593Smuzhiyun 155*4882a593Smuzhiyun /** The last time that the doorbell was rung. */ 156*4882a593Smuzhiyun u64 last_db_time; 157*4882a593Smuzhiyun 158*4882a593Smuzhiyun /** The doorbell timeout. If the doorbell was not rung for this time and 159*4882a593Smuzhiyun * fill_cnt is non-zero, ring the doorbell again. 160*4882a593Smuzhiyun */ 161*4882a593Smuzhiyun u32 db_timeout; 162*4882a593Smuzhiyun 163*4882a593Smuzhiyun /** Statistics for this input queue. */ 164*4882a593Smuzhiyun struct oct_iq_stats stats; 165*4882a593Smuzhiyun 166*4882a593Smuzhiyun /** DMA mapped base address of the input descriptor ring. */ 167*4882a593Smuzhiyun dma_addr_t base_addr_dma; 168*4882a593Smuzhiyun 169*4882a593Smuzhiyun /** Application context */ 170*4882a593Smuzhiyun void *app_ctx; 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun /* network stack queue index */ 173*4882a593Smuzhiyun int q_index; 174*4882a593Smuzhiyun 175*4882a593Smuzhiyun /*os ifidx associated with this queue */ 176*4882a593Smuzhiyun int ifidx; 177*4882a593Smuzhiyun 178*4882a593Smuzhiyun }; 179*4882a593Smuzhiyun 180*4882a593Smuzhiyun /*---------------------- INSTRUCTION FORMAT ----------------------------*/ 181*4882a593Smuzhiyun 182*4882a593Smuzhiyun /** 32-byte instruction format. 183*4882a593Smuzhiyun * Format of instruction for a 32-byte mode input queue. 184*4882a593Smuzhiyun */ 185*4882a593Smuzhiyun struct octeon_instr_32B { 186*4882a593Smuzhiyun /** Pointer where the input data is available. */ 187*4882a593Smuzhiyun u64 dptr; 188*4882a593Smuzhiyun 189*4882a593Smuzhiyun /** Instruction Header. */ 190*4882a593Smuzhiyun u64 ih; 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun /** Pointer where the response for a RAW mode packet will be written 193*4882a593Smuzhiyun * by Octeon. 194*4882a593Smuzhiyun */ 195*4882a593Smuzhiyun u64 rptr; 196*4882a593Smuzhiyun 197*4882a593Smuzhiyun /** Input Request Header. Additional info about the input. */ 198*4882a593Smuzhiyun u64 irh; 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun }; 201*4882a593Smuzhiyun 202*4882a593Smuzhiyun #define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B)) 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun /** 64-byte instruction format. 205*4882a593Smuzhiyun * Format of instruction for a 64-byte mode input queue. 206*4882a593Smuzhiyun */ 207*4882a593Smuzhiyun struct octeon_instr2_64B { 208*4882a593Smuzhiyun /** Pointer where the input data is available. */ 209*4882a593Smuzhiyun u64 dptr; 210*4882a593Smuzhiyun 211*4882a593Smuzhiyun /** Instruction Header. */ 212*4882a593Smuzhiyun u64 ih2; 213*4882a593Smuzhiyun 214*4882a593Smuzhiyun /** Input Request Header. */ 215*4882a593Smuzhiyun u64 irh; 216*4882a593Smuzhiyun 217*4882a593Smuzhiyun /** opcode/subcode specific parameters */ 218*4882a593Smuzhiyun u64 ossp[2]; 219*4882a593Smuzhiyun 220*4882a593Smuzhiyun /** Return Data Parameters */ 221*4882a593Smuzhiyun u64 rdp; 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun /** Pointer where the response for a RAW mode packet will be written 224*4882a593Smuzhiyun * by Octeon. 225*4882a593Smuzhiyun */ 226*4882a593Smuzhiyun u64 rptr; 227*4882a593Smuzhiyun 228*4882a593Smuzhiyun u64 reserved; 229*4882a593Smuzhiyun }; 230*4882a593Smuzhiyun 231*4882a593Smuzhiyun struct octeon_instr3_64B { 232*4882a593Smuzhiyun /** Pointer where the input data is available. */ 233*4882a593Smuzhiyun u64 dptr; 234*4882a593Smuzhiyun 235*4882a593Smuzhiyun /** Instruction Header. */ 236*4882a593Smuzhiyun u64 ih3; 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun /** Instruction Header. */ 239*4882a593Smuzhiyun u64 pki_ih3; 240*4882a593Smuzhiyun 241*4882a593Smuzhiyun /** Input Request Header. */ 242*4882a593Smuzhiyun u64 irh; 243*4882a593Smuzhiyun 244*4882a593Smuzhiyun /** opcode/subcode specific parameters */ 245*4882a593Smuzhiyun u64 ossp[2]; 246*4882a593Smuzhiyun 247*4882a593Smuzhiyun /** Return Data Parameters */ 248*4882a593Smuzhiyun u64 rdp; 249*4882a593Smuzhiyun 250*4882a593Smuzhiyun /** Pointer where the response for a RAW mode packet will be written 251*4882a593Smuzhiyun * by Octeon. 252*4882a593Smuzhiyun */ 253*4882a593Smuzhiyun u64 rptr; 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun }; 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun union octeon_instr_64B { 258*4882a593Smuzhiyun struct octeon_instr2_64B cmd2; 259*4882a593Smuzhiyun struct octeon_instr3_64B cmd3; 260*4882a593Smuzhiyun }; 261*4882a593Smuzhiyun 262*4882a593Smuzhiyun #define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B)) 263*4882a593Smuzhiyun 264*4882a593Smuzhiyun /** The size of each buffer in soft command buffer pool 265*4882a593Smuzhiyun */ 266*4882a593Smuzhiyun #define SOFT_COMMAND_BUFFER_SIZE 2048 267*4882a593Smuzhiyun 268*4882a593Smuzhiyun struct octeon_soft_command { 269*4882a593Smuzhiyun /** Soft command buffer info. */ 270*4882a593Smuzhiyun struct list_head node; 271*4882a593Smuzhiyun u64 dma_addr; 272*4882a593Smuzhiyun u32 size; 273*4882a593Smuzhiyun 274*4882a593Smuzhiyun /** Command and return status */ 275*4882a593Smuzhiyun union octeon_instr_64B cmd; 276*4882a593Smuzhiyun 277*4882a593Smuzhiyun #define COMPLETION_WORD_INIT 0xffffffffffffffffULL 278*4882a593Smuzhiyun u64 *status_word; 279*4882a593Smuzhiyun 280*4882a593Smuzhiyun /** Data buffer info */ 281*4882a593Smuzhiyun void *virtdptr; 282*4882a593Smuzhiyun u64 dmadptr; 283*4882a593Smuzhiyun u32 datasize; 284*4882a593Smuzhiyun 285*4882a593Smuzhiyun /** Return buffer info */ 286*4882a593Smuzhiyun void *virtrptr; 287*4882a593Smuzhiyun u64 dmarptr; 288*4882a593Smuzhiyun u32 rdatasize; 289*4882a593Smuzhiyun 290*4882a593Smuzhiyun /** Context buffer info */ 291*4882a593Smuzhiyun void *ctxptr; 292*4882a593Smuzhiyun u32 ctxsize; 293*4882a593Smuzhiyun 294*4882a593Smuzhiyun /** Time out and callback */ 295*4882a593Smuzhiyun size_t expiry_time; 296*4882a593Smuzhiyun u32 iq_no; 297*4882a593Smuzhiyun void (*callback)(struct octeon_device *, u32, void *); 298*4882a593Smuzhiyun void *callback_arg; 299*4882a593Smuzhiyun 300*4882a593Smuzhiyun int caller_is_done; 301*4882a593Smuzhiyun u32 sc_status; 302*4882a593Smuzhiyun struct completion complete; 303*4882a593Smuzhiyun }; 304*4882a593Smuzhiyun 305*4882a593Smuzhiyun /* max timeout (in milli sec) for soft request */ 306*4882a593Smuzhiyun #define LIO_SC_MAX_TMO_MS 60000 307*4882a593Smuzhiyun 308*4882a593Smuzhiyun /** Maximum number of buffers to allocate into soft command buffer pool 309*4882a593Smuzhiyun */ 310*4882a593Smuzhiyun #define MAX_SOFT_COMMAND_BUFFERS 256 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun /** Head of a soft command buffer pool. 313*4882a593Smuzhiyun */ 314*4882a593Smuzhiyun struct octeon_sc_buffer_pool { 315*4882a593Smuzhiyun /** List structure to add delete pending entries to */ 316*4882a593Smuzhiyun struct list_head head; 317*4882a593Smuzhiyun 318*4882a593Smuzhiyun /** A lock for this response list */ 319*4882a593Smuzhiyun spinlock_t lock; 320*4882a593Smuzhiyun 321*4882a593Smuzhiyun atomic_t alloc_buf_count; 322*4882a593Smuzhiyun }; 323*4882a593Smuzhiyun 324*4882a593Smuzhiyun #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ 325*4882a593Smuzhiyun (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) 326*4882a593Smuzhiyun 327*4882a593Smuzhiyun int octeon_setup_sc_buffer_pool(struct octeon_device *oct); 328*4882a593Smuzhiyun int octeon_free_sc_done_list(struct octeon_device *oct); 329*4882a593Smuzhiyun int octeon_free_sc_zombie_list(struct octeon_device *oct); 330*4882a593Smuzhiyun int octeon_free_sc_buffer_pool(struct octeon_device *oct); 331*4882a593Smuzhiyun struct octeon_soft_command * 332*4882a593Smuzhiyun octeon_alloc_soft_command(struct octeon_device *oct, 333*4882a593Smuzhiyun u32 datasize, u32 rdatasize, 334*4882a593Smuzhiyun u32 ctxsize); 335*4882a593Smuzhiyun void octeon_free_soft_command(struct octeon_device *oct, 336*4882a593Smuzhiyun struct octeon_soft_command *sc); 337*4882a593Smuzhiyun 338*4882a593Smuzhiyun /** 339*4882a593Smuzhiyun * octeon_init_instr_queue() 340*4882a593Smuzhiyun * @param octeon_dev - pointer to the octeon device structure. 341*4882a593Smuzhiyun * @param txpciq - queue to be initialized (0 <= q_no <= 3). 342*4882a593Smuzhiyun * 343*4882a593Smuzhiyun * Called at driver init time for each input queue. iq_conf has the 344*4882a593Smuzhiyun * configuration parameters for the queue. 345*4882a593Smuzhiyun * 346*4882a593Smuzhiyun * @return Success: 0 Failure: 1 347*4882a593Smuzhiyun */ 348*4882a593Smuzhiyun int octeon_init_instr_queue(struct octeon_device *octeon_dev, 349*4882a593Smuzhiyun union oct_txpciq txpciq, 350*4882a593Smuzhiyun u32 num_descs); 351*4882a593Smuzhiyun 352*4882a593Smuzhiyun /** 353*4882a593Smuzhiyun * octeon_delete_instr_queue() 354*4882a593Smuzhiyun * @param octeon_dev - pointer to the octeon device structure. 355*4882a593Smuzhiyun * @param iq_no - queue to be deleted (0 <= q_no <= 3). 356*4882a593Smuzhiyun * 357*4882a593Smuzhiyun * Called at driver unload time for each input queue. Deletes all 358*4882a593Smuzhiyun * allocated resources for the input queue. 359*4882a593Smuzhiyun * 360*4882a593Smuzhiyun * @return Success: 0 Failure: 1 361*4882a593Smuzhiyun */ 362*4882a593Smuzhiyun int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no); 363*4882a593Smuzhiyun 364*4882a593Smuzhiyun int lio_wait_for_instr_fetch(struct octeon_device *oct); 365*4882a593Smuzhiyun 366*4882a593Smuzhiyun void 367*4882a593Smuzhiyun octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no); 368*4882a593Smuzhiyun 369*4882a593Smuzhiyun int 370*4882a593Smuzhiyun octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, 371*4882a593Smuzhiyun void (*fn)(void *)); 372*4882a593Smuzhiyun 373*4882a593Smuzhiyun int 374*4882a593Smuzhiyun lio_process_iq_request_list(struct octeon_device *oct, 375*4882a593Smuzhiyun struct octeon_instr_queue *iq, u32 napi_budget); 376*4882a593Smuzhiyun 377*4882a593Smuzhiyun int octeon_send_command(struct octeon_device *oct, u32 iq_no, 378*4882a593Smuzhiyun u32 force_db, void *cmd, void *buf, 379*4882a593Smuzhiyun u32 datasize, u32 reqtype); 380*4882a593Smuzhiyun 381*4882a593Smuzhiyun void octeon_dump_soft_command(struct octeon_device *oct, 382*4882a593Smuzhiyun struct octeon_soft_command *sc); 383*4882a593Smuzhiyun 384*4882a593Smuzhiyun void octeon_prepare_soft_command(struct octeon_device *oct, 385*4882a593Smuzhiyun struct octeon_soft_command *sc, 386*4882a593Smuzhiyun u8 opcode, u8 subcode, 387*4882a593Smuzhiyun u32 irh_ossp, u64 ossp0, 388*4882a593Smuzhiyun u64 ossp1); 389*4882a593Smuzhiyun 390*4882a593Smuzhiyun int octeon_send_soft_command(struct octeon_device *oct, 391*4882a593Smuzhiyun struct octeon_soft_command *sc); 392*4882a593Smuzhiyun 393*4882a593Smuzhiyun int octeon_setup_iq(struct octeon_device *oct, int ifidx, 394*4882a593Smuzhiyun int q_index, union oct_txpciq iq_no, u32 num_descs, 395*4882a593Smuzhiyun void *app_ctx); 396*4882a593Smuzhiyun int 397*4882a593Smuzhiyun octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, 398*4882a593Smuzhiyun u32 napi_budget); 399*4882a593Smuzhiyun #endif /* __OCTEON_IQ_H__ */ 400