1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* Some linux -> U-Boot compatibility stuff */ 39 #define netdev_err(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_warn(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_info(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 #define netdev_dbg(dev, fmt, args...) \ 46 printf(fmt, ##args) 47 48 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 49 50 #define __verify_pcpu_ptr(ptr) \ 51 do { \ 52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 53 (void)__vpp_verify; \ 54 } while (0) 55 56 #define VERIFY_PERCPU_PTR(__p) \ 57 ({ \ 58 __verify_pcpu_ptr(__p); \ 59 (typeof(*(__p)) __kernel __force *)(__p); \ 60 }) 61 62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 63 #define smp_processor_id() 0 64 #define num_present_cpus() 1 65 #define for_each_present_cpu(cpu) \ 66 for ((cpu) = 0; (cpu) < 1; (cpu)++) 67 68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 69 70 #define CONFIG_NR_CPUS 1 71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 72 73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 74 #define WRAP (2 + ETH_HLEN + 4 + 32) 75 #define MTU 1500 76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 77 78 #define MVPP2_SMI_TIMEOUT 10000 79 80 /* RX Fifo Registers */ 81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 84 #define MVPP2_RX_FIFO_INIT_REG 0x64 85 86 /* RX DMA Top Registers */ 87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 98 #define MVPP2_RXQ_POOL_LONG_OFFS 24 99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 103 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 104 105 /* Parser Registers */ 106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 107 #define MVPP2_PRS_PORT_LU_MAX 0xf 108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 123 124 /* Classifier Registers */ 125 #define MVPP2_CLS_MODE_REG 0x1800 126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 127 #define MVPP2_CLS_PORT_WAY_REG 0x1810 128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 131 #define MVPP2_CLS_LKP_TBL_REG 0x1818 132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 144 145 /* Descriptor Manager Top Registers */ 146 #define MVPP2_RXQ_NUM_REG 0x2040 147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 148 #define MVPP22_DESC_ADDR_OFFS 8 149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 158 #define MVPP2_RXQ_THRESH_REG 0x204c 159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 161 #define MVPP2_RXQ_INDEX_REG 0x2050 162 #define MVPP2_TXQ_NUM_REG 0x2080 163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 167 #define MVPP2_TXQ_THRESH_REG 0x2094 168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 170 #define MVPP2_TXQ_INDEX_REG 0x2098 171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 177 #define MVPP2_TXQ_PENDING_REG 0x20a0 178 #define MVPP2_TXQ_PENDING_MASK 0x3fff 179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 196 197 /* MBUS bridge registers */ 198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 201 #define MVPP2_BASE_ADDR_ENABLE 0x4060 202 203 /* AXI Bridge Registers */ 204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 216 217 /* Values for AXI Bridge registers */ 218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 220 221 #define MVPP22_AXI_CODE_CACHE_OFFS 0 222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 223 224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 227 228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 230 231 /* Interrupt Cause and Mask registers */ 232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 234 235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 239 240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 242 243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 247 248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 266 267 /* Buffer Manager registers */ 268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 281 #define MVPP2_BM_START_MASK BIT(0) 282 #define MVPP2_BM_STOP_MASK BIT(1) 283 #define MVPP2_BM_STATE_MASK BIT(4) 284 #define MVPP2_BM_LOW_THRESH_OFFS 8 285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 287 MVPP2_BM_LOW_THRESH_OFFS) 288 #define MVPP2_BM_HIGH_THRESH_OFFS 16 289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 291 MVPP2_BM_HIGH_THRESH_OFFS) 292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 311 #define MVPP21_BM_MC_RLS_REG 0x64c4 312 #define MVPP2_BM_MC_ID_MASK 0xfff 313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 318 #define MVPP22_BM_MC_RLS_REG 0x64d4 319 320 /* TX Scheduler registers */ 321 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 322 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 323 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 324 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 325 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 326 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 327 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 328 #define MVPP2_TXP_MTU_MAX 0x7FFFF 329 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 330 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 331 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 332 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 333 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 334 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 335 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 336 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 337 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 338 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 339 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 340 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 341 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 342 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 343 344 /* TX general registers */ 345 #define MVPP2_TX_SNOOP_REG 0x8800 346 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 347 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 348 349 /* LMS registers */ 350 #define MVPP2_SRC_ADDR_MIDDLE 0x24 351 #define MVPP2_SRC_ADDR_HIGH 0x28 352 #define MVPP2_PHY_AN_CFG0_REG 0x34 353 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 354 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 355 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 356 357 /* Per-port registers */ 358 #define MVPP2_GMAC_CTRL_0_REG 0x0 359 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 360 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 361 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 362 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 363 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 364 #define MVPP2_GMAC_CTRL_1_REG 0x4 365 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 366 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 367 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 368 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 369 #define MVPP2_GMAC_SA_LOW_OFFS 7 370 #define MVPP2_GMAC_CTRL_2_REG 0x8 371 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 372 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 373 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 374 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 375 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 376 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 377 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 378 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 379 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 380 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 381 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 382 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 383 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 384 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 385 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 386 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 387 #define MVPP2_GMAC_EN_FC_AN BIT(11) 388 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 389 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 390 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 391 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 392 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 393 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 395 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 396 #define MVPP2_GMAC_CTRL_4_REG 0x90 397 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 398 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 399 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 400 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 401 402 /* 403 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 404 * relative to port->base. 405 */ 406 407 /* Port Mac Control0 */ 408 #define MVPP22_XLG_CTRL0_REG 0x100 409 #define MVPP22_XLG_PORT_EN BIT(0) 410 #define MVPP22_XLG_MAC_RESETN BIT(1) 411 #define MVPP22_XLG_RX_FC_EN BIT(7) 412 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 413 /* Port Mac Control1 */ 414 #define MVPP22_XLG_CTRL1_REG 0x104 415 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 416 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 417 /* Port Interrupt Mask */ 418 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 419 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 420 /* Port Mac Control3 */ 421 #define MVPP22_XLG_CTRL3_REG 0x11c 422 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 423 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 424 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 425 /* Port Mac Control4 */ 426 #define MVPP22_XLG_CTRL4_REG 0x184 427 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 428 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 429 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 430 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 431 432 /* XPCS registers */ 433 434 /* Global Configuration 0 */ 435 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 436 #define MVPP22_XPCS_PCSRESET BIT(0) 437 #define MVPP22_XPCS_PCSMODE_OFFS 3 438 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 439 MVPP22_XPCS_PCSMODE_OFFS) 440 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 441 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 442 MVPP22_XPCS_LANEACTIVE_OFFS) 443 444 /* MPCS registers */ 445 446 #define PCS40G_COMMON_CONTROL 0x14 447 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 448 449 #define PCS_CLOCK_RESET 0x14c 450 #define TX_SD_CLK_RESET_MASK BIT(0) 451 #define RX_SD_CLK_RESET_MASK BIT(1) 452 #define MAC_CLK_RESET_MASK BIT(2) 453 #define CLK_DIVISION_RATIO_OFFS 4 454 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 455 #define CLK_DIV_PHASE_SET_MASK BIT(11) 456 457 /* System Soft Reset 1 */ 458 #define GOP_SOFT_RESET_1_REG 0x108 459 #define NETC_GOP_SOFT_RESET_OFFS 6 460 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 461 NETC_GOP_SOFT_RESET_OFFS) 462 463 /* Ports Control 0 */ 464 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 465 #define NETC_BUS_WIDTH_SELECT_OFFS 1 466 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 467 NETC_BUS_WIDTH_SELECT_OFFS) 468 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 469 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 470 NETC_GIG_RX_DATA_SAMPLE_OFFS) 471 #define NETC_CLK_DIV_PHASE_OFFS 31 472 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 473 /* Ports Control 1 */ 474 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 475 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 476 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 477 NETC_PORTS_ACTIVE_OFFSET(p)) 478 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 479 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 480 NETC_PORT_GIG_RF_RESET_OFFS(p)) 481 #define NETCOMP_CONTROL_0_REG 0x120 482 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 483 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 484 NETC_GBE_PORT0_SGMII_MODE_OFFS) 485 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 486 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 487 NETC_GBE_PORT1_SGMII_MODE_OFFS) 488 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 489 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 490 NETC_GBE_PORT1_MII_MODE_OFFS) 491 492 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 493 #define MVPP22_SMI_POLLING_EN BIT(10) 494 495 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 496 (0x4 * (port))) 497 498 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 499 500 /* Descriptor ring Macros */ 501 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 502 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 503 504 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 505 #define MVPP21_SMI 0x0054 506 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 507 #define MVPP22_SMI 0x1200 508 #define MVPP2_PHY_REG_MASK 0x1f 509 /* SMI register fields */ 510 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 511 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 512 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 513 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 514 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 515 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 516 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 517 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 518 519 #define MVPP2_PHY_ADDR_MASK 0x1f 520 #define MVPP2_PHY_REG_MASK 0x1f 521 522 /* Additional PPv2.2 offsets */ 523 #define MVPP22_MPCS 0x007000 524 #define MVPP22_XPCS 0x007400 525 #define MVPP22_PORT_BASE 0x007e00 526 #define MVPP22_PORT_OFFSET 0x001000 527 #define MVPP22_RFU1 0x318000 528 529 /* Maximum number of ports */ 530 #define MVPP22_GOP_MAC_NUM 4 531 532 /* Sets the field located at the specified in data */ 533 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 534 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 535 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 536 537 /* Net Complex */ 538 enum mv_netc_topology { 539 MV_NETC_GE_MAC2_SGMII = BIT(0), 540 MV_NETC_GE_MAC3_SGMII = BIT(1), 541 MV_NETC_GE_MAC3_RGMII = BIT(2), 542 }; 543 544 enum mv_netc_phase { 545 MV_NETC_FIRST_PHASE, 546 MV_NETC_SECOND_PHASE, 547 }; 548 549 enum mv_netc_sgmii_xmi_mode { 550 MV_NETC_GBE_SGMII, 551 MV_NETC_GBE_XMII, 552 }; 553 554 enum mv_netc_mii_mode { 555 MV_NETC_GBE_RGMII, 556 MV_NETC_GBE_MII, 557 }; 558 559 enum mv_netc_lanes { 560 MV_NETC_LANE_23, 561 MV_NETC_LANE_45, 562 }; 563 564 /* Various constants */ 565 566 /* Coalescing */ 567 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 568 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 569 #define MVPP2_RX_COAL_PKTS 32 570 #define MVPP2_RX_COAL_USEC 100 571 572 /* The two bytes Marvell header. Either contains a special value used 573 * by Marvell switches when a specific hardware mode is enabled (not 574 * supported by this driver) or is filled automatically by zeroes on 575 * the RX side. Those two bytes being at the front of the Ethernet 576 * header, they allow to have the IP header aligned on a 4 bytes 577 * boundary automatically: the hardware skips those two bytes on its 578 * own. 579 */ 580 #define MVPP2_MH_SIZE 2 581 #define MVPP2_ETH_TYPE_LEN 2 582 #define MVPP2_PPPOE_HDR_SIZE 8 583 #define MVPP2_VLAN_TAG_LEN 4 584 585 /* Lbtd 802.3 type */ 586 #define MVPP2_IP_LBDT_TYPE 0xfffa 587 588 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 589 #define MVPP2_TX_CSUM_MAX_SIZE 9800 590 591 /* Timeout constants */ 592 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 593 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 594 595 #define MVPP2_TX_MTU_MAX 0x7ffff 596 597 /* Maximum number of T-CONTs of PON port */ 598 #define MVPP2_MAX_TCONT 16 599 600 /* Maximum number of supported ports */ 601 #define MVPP2_MAX_PORTS 4 602 603 /* Maximum number of TXQs used by single port */ 604 #define MVPP2_MAX_TXQ 8 605 606 /* Default number of TXQs in use */ 607 #define MVPP2_DEFAULT_TXQ 1 608 609 /* Dfault number of RXQs in use */ 610 #define MVPP2_DEFAULT_RXQ 1 611 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 612 613 /* Max number of Rx descriptors */ 614 #define MVPP2_MAX_RXD 16 615 616 /* Max number of Tx descriptors */ 617 #define MVPP2_MAX_TXD 16 618 619 /* Amount of Tx descriptors that can be reserved at once by CPU */ 620 #define MVPP2_CPU_DESC_CHUNK 64 621 622 /* Max number of Tx descriptors in each aggregated queue */ 623 #define MVPP2_AGGR_TXQ_SIZE 256 624 625 /* Descriptor aligned size */ 626 #define MVPP2_DESC_ALIGNED_SIZE 32 627 628 /* Descriptor alignment mask */ 629 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 630 631 /* RX FIFO constants */ 632 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 633 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 634 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 635 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 636 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 637 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 638 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 639 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 640 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 641 642 /* TX general registers */ 643 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 644 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 645 646 /* TX FIFO constants */ 647 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 648 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 649 650 /* RX buffer constants */ 651 #define MVPP2_SKB_SHINFO_SIZE \ 652 0 653 654 #define MVPP2_RX_PKT_SIZE(mtu) \ 655 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 656 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 657 658 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 659 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 660 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 661 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 662 663 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 664 665 /* IPv6 max L3 address size */ 666 #define MVPP2_MAX_L3_ADDR_SIZE 16 667 668 /* Port flags */ 669 #define MVPP2_F_LOOPBACK BIT(0) 670 671 /* Marvell tag types */ 672 enum mvpp2_tag_type { 673 MVPP2_TAG_TYPE_NONE = 0, 674 MVPP2_TAG_TYPE_MH = 1, 675 MVPP2_TAG_TYPE_DSA = 2, 676 MVPP2_TAG_TYPE_EDSA = 3, 677 MVPP2_TAG_TYPE_VLAN = 4, 678 MVPP2_TAG_TYPE_LAST = 5 679 }; 680 681 /* Parser constants */ 682 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 683 #define MVPP2_PRS_TCAM_WORDS 6 684 #define MVPP2_PRS_SRAM_WORDS 4 685 #define MVPP2_PRS_FLOW_ID_SIZE 64 686 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 687 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 688 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 689 #define MVPP2_PRS_IPV4_HEAD 0x40 690 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 691 #define MVPP2_PRS_IPV4_MC 0xe0 692 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 693 #define MVPP2_PRS_IPV4_BC_MASK 0xff 694 #define MVPP2_PRS_IPV4_IHL 0x5 695 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 696 #define MVPP2_PRS_IPV6_MC 0xff 697 #define MVPP2_PRS_IPV6_MC_MASK 0xff 698 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 699 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 700 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 701 #define MVPP2_PRS_DBL_VLANS_MAX 100 702 703 /* Tcam structure: 704 * - lookup ID - 4 bits 705 * - port ID - 1 byte 706 * - additional information - 1 byte 707 * - header data - 8 bytes 708 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 709 */ 710 #define MVPP2_PRS_AI_BITS 8 711 #define MVPP2_PRS_PORT_MASK 0xff 712 #define MVPP2_PRS_LU_MASK 0xf 713 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 714 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 715 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 716 (((offs) * 2) - ((offs) % 2) + 2) 717 #define MVPP2_PRS_TCAM_AI_BYTE 16 718 #define MVPP2_PRS_TCAM_PORT_BYTE 17 719 #define MVPP2_PRS_TCAM_LU_BYTE 20 720 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 721 #define MVPP2_PRS_TCAM_INV_WORD 5 722 /* Tcam entries ID */ 723 #define MVPP2_PE_DROP_ALL 0 724 #define MVPP2_PE_FIRST_FREE_TID 1 725 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 726 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 727 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 728 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 729 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 730 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 731 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 732 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 733 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 734 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 735 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 736 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 737 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 738 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 739 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 740 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 741 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 742 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 743 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 744 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 745 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 746 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 747 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 748 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 749 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 750 751 /* Sram structure 752 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 753 */ 754 #define MVPP2_PRS_SRAM_RI_OFFS 0 755 #define MVPP2_PRS_SRAM_RI_WORD 0 756 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 757 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 758 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 759 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 760 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 761 #define MVPP2_PRS_SRAM_UDF_OFFS 73 762 #define MVPP2_PRS_SRAM_UDF_BITS 8 763 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 764 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 765 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 766 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 767 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 768 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 769 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 770 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 774 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 775 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 780 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 781 #define MVPP2_PRS_SRAM_AI_OFFS 90 782 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 783 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 784 #define MVPP2_PRS_SRAM_AI_MASK 0xff 785 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 786 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 787 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 788 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 789 790 /* Sram result info bits assignment */ 791 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 792 #define MVPP2_PRS_RI_DSA_MASK 0x2 793 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 794 #define MVPP2_PRS_RI_VLAN_NONE 0x0 795 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 796 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 797 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 798 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 799 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 800 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 801 #define MVPP2_PRS_RI_L2_UCAST 0x0 802 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 803 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 804 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 805 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 806 #define MVPP2_PRS_RI_L3_UN 0x0 807 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 808 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 809 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 810 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 811 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 812 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 813 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 814 #define MVPP2_PRS_RI_L3_UCAST 0x0 815 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 816 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 817 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 818 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 819 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 820 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 821 #define MVPP2_PRS_RI_L4_TCP BIT(22) 822 #define MVPP2_PRS_RI_L4_UDP BIT(23) 823 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 824 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 825 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 826 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 827 828 /* Sram additional info bits assignment */ 829 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 830 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 831 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 832 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 833 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 834 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 835 #define MVPP2_PRS_SINGLE_VLAN_AI 0 836 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 837 838 /* DSA/EDSA type */ 839 #define MVPP2_PRS_TAGGED true 840 #define MVPP2_PRS_UNTAGGED false 841 #define MVPP2_PRS_EDSA true 842 #define MVPP2_PRS_DSA false 843 844 /* MAC entries, shadow udf */ 845 enum mvpp2_prs_udf { 846 MVPP2_PRS_UDF_MAC_DEF, 847 MVPP2_PRS_UDF_MAC_RANGE, 848 MVPP2_PRS_UDF_L2_DEF, 849 MVPP2_PRS_UDF_L2_DEF_COPY, 850 MVPP2_PRS_UDF_L2_USER, 851 }; 852 853 /* Lookup ID */ 854 enum mvpp2_prs_lookup { 855 MVPP2_PRS_LU_MH, 856 MVPP2_PRS_LU_MAC, 857 MVPP2_PRS_LU_DSA, 858 MVPP2_PRS_LU_VLAN, 859 MVPP2_PRS_LU_L2, 860 MVPP2_PRS_LU_PPPOE, 861 MVPP2_PRS_LU_IP4, 862 MVPP2_PRS_LU_IP6, 863 MVPP2_PRS_LU_FLOWS, 864 MVPP2_PRS_LU_LAST, 865 }; 866 867 /* L3 cast enum */ 868 enum mvpp2_prs_l3_cast { 869 MVPP2_PRS_L3_UNI_CAST, 870 MVPP2_PRS_L3_MULTI_CAST, 871 MVPP2_PRS_L3_BROAD_CAST 872 }; 873 874 /* Classifier constants */ 875 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 876 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 877 #define MVPP2_CLS_LKP_TBL_SIZE 64 878 879 /* BM constants */ 880 #define MVPP2_BM_POOLS_NUM 1 881 #define MVPP2_BM_LONG_BUF_NUM 16 882 #define MVPP2_BM_SHORT_BUF_NUM 16 883 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 884 #define MVPP2_BM_POOL_PTR_ALIGN 128 885 #define MVPP2_BM_SWF_LONG_POOL(port) 0 886 887 /* BM cookie (32 bits) definition */ 888 #define MVPP2_BM_COOKIE_POOL_OFFS 8 889 #define MVPP2_BM_COOKIE_CPU_OFFS 24 890 891 /* BM short pool packet size 892 * These value assure that for SWF the total number 893 * of bytes allocated for each buffer will be 512 894 */ 895 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 896 897 enum mvpp2_bm_type { 898 MVPP2_BM_FREE, 899 MVPP2_BM_SWF_LONG, 900 MVPP2_BM_SWF_SHORT 901 }; 902 903 /* Definitions */ 904 905 /* Shared Packet Processor resources */ 906 struct mvpp2 { 907 /* Shared registers' base addresses */ 908 void __iomem *base; 909 void __iomem *lms_base; 910 void __iomem *iface_base; 911 void __iomem *mdio_base; 912 913 void __iomem *mpcs_base; 914 void __iomem *xpcs_base; 915 void __iomem *rfu1_base; 916 917 u32 netc_config; 918 919 /* List of pointers to port structures */ 920 struct mvpp2_port **port_list; 921 922 /* Aggregated TXQs */ 923 struct mvpp2_tx_queue *aggr_txqs; 924 925 /* BM pools */ 926 struct mvpp2_bm_pool *bm_pools; 927 928 /* PRS shadow table */ 929 struct mvpp2_prs_shadow *prs_shadow; 930 /* PRS auxiliary table for double vlan entries control */ 931 bool *prs_double_vlans; 932 933 /* Tclk value */ 934 u32 tclk; 935 936 /* HW version */ 937 enum { MVPP21, MVPP22 } hw_version; 938 939 /* Maximum number of RXQs per port */ 940 unsigned int max_port_rxqs; 941 942 struct mii_dev *bus; 943 944 int probe_done; 945 }; 946 947 struct mvpp2_pcpu_stats { 948 u64 rx_packets; 949 u64 rx_bytes; 950 u64 tx_packets; 951 u64 tx_bytes; 952 }; 953 954 struct mvpp2_port { 955 u8 id; 956 957 /* Index of the port from the "group of ports" complex point 958 * of view 959 */ 960 int gop_id; 961 962 int irq; 963 964 struct mvpp2 *priv; 965 966 /* Per-port registers' base address */ 967 void __iomem *base; 968 969 struct mvpp2_rx_queue **rxqs; 970 struct mvpp2_tx_queue **txqs; 971 972 int pkt_size; 973 974 u32 pending_cause_rx; 975 976 /* Per-CPU port control */ 977 struct mvpp2_port_pcpu __percpu *pcpu; 978 979 /* Flags */ 980 unsigned long flags; 981 982 u16 tx_ring_size; 983 u16 rx_ring_size; 984 struct mvpp2_pcpu_stats __percpu *stats; 985 986 struct phy_device *phy_dev; 987 phy_interface_t phy_interface; 988 int phy_node; 989 int phyaddr; 990 #ifdef CONFIG_DM_GPIO 991 struct gpio_desc phy_reset_gpio; 992 struct gpio_desc phy_tx_disable_gpio; 993 #endif 994 int init; 995 unsigned int link; 996 unsigned int duplex; 997 unsigned int speed; 998 999 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 1000 1001 struct mvpp2_bm_pool *pool_long; 1002 struct mvpp2_bm_pool *pool_short; 1003 1004 /* Index of first port's physical RXQ */ 1005 u8 first_rxq; 1006 1007 u8 dev_addr[ETH_ALEN]; 1008 }; 1009 1010 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1011 * layout of the transmit and reception DMA descriptors, and their 1012 * layout is therefore defined by the hardware design 1013 */ 1014 1015 #define MVPP2_TXD_L3_OFF_SHIFT 0 1016 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1017 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1018 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1019 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1020 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1021 #define MVPP2_TXD_L4_UDP BIT(24) 1022 #define MVPP2_TXD_L3_IP6 BIT(26) 1023 #define MVPP2_TXD_L_DESC BIT(28) 1024 #define MVPP2_TXD_F_DESC BIT(29) 1025 1026 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1027 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1028 #define MVPP2_RXD_ERR_CRC 0x0 1029 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1030 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1031 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1032 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1033 #define MVPP2_RXD_HWF_SYNC BIT(21) 1034 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1035 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1036 #define MVPP2_RXD_L4_TCP BIT(25) 1037 #define MVPP2_RXD_L4_UDP BIT(26) 1038 #define MVPP2_RXD_L3_IP4 BIT(28) 1039 #define MVPP2_RXD_L3_IP6 BIT(30) 1040 #define MVPP2_RXD_BUF_HDR BIT(31) 1041 1042 /* HW TX descriptor for PPv2.1 */ 1043 struct mvpp21_tx_desc { 1044 u32 command; /* Options used by HW for packet transmitting.*/ 1045 u8 packet_offset; /* the offset from the buffer beginning */ 1046 u8 phys_txq; /* destination queue ID */ 1047 u16 data_size; /* data size of transmitted packet in bytes */ 1048 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1049 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1050 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1051 u32 reserved2; /* reserved (for future use) */ 1052 }; 1053 1054 /* HW RX descriptor for PPv2.1 */ 1055 struct mvpp21_rx_desc { 1056 u32 status; /* info about received packet */ 1057 u16 reserved1; /* parser_info (for future use, PnC) */ 1058 u16 data_size; /* size of received packet in bytes */ 1059 u32 buf_dma_addr; /* physical address of the buffer */ 1060 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1061 u16 reserved2; /* gem_port_id (for future use, PON) */ 1062 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1063 u8 reserved4; /* bm_qset (for future use, BM) */ 1064 u8 reserved5; 1065 u16 reserved6; /* classify_info (for future use, PnC) */ 1066 u32 reserved7; /* flow_id (for future use, PnC) */ 1067 u32 reserved8; 1068 }; 1069 1070 /* HW TX descriptor for PPv2.2 */ 1071 struct mvpp22_tx_desc { 1072 u32 command; 1073 u8 packet_offset; 1074 u8 phys_txq; 1075 u16 data_size; 1076 u64 reserved1; 1077 u64 buf_dma_addr_ptp; 1078 u64 buf_cookie_misc; 1079 }; 1080 1081 /* HW RX descriptor for PPv2.2 */ 1082 struct mvpp22_rx_desc { 1083 u32 status; 1084 u16 reserved1; 1085 u16 data_size; 1086 u32 reserved2; 1087 u32 reserved3; 1088 u64 buf_dma_addr_key_hash; 1089 u64 buf_cookie_misc; 1090 }; 1091 1092 /* Opaque type used by the driver to manipulate the HW TX and RX 1093 * descriptors 1094 */ 1095 struct mvpp2_tx_desc { 1096 union { 1097 struct mvpp21_tx_desc pp21; 1098 struct mvpp22_tx_desc pp22; 1099 }; 1100 }; 1101 1102 struct mvpp2_rx_desc { 1103 union { 1104 struct mvpp21_rx_desc pp21; 1105 struct mvpp22_rx_desc pp22; 1106 }; 1107 }; 1108 1109 /* Per-CPU Tx queue control */ 1110 struct mvpp2_txq_pcpu { 1111 int cpu; 1112 1113 /* Number of Tx DMA descriptors in the descriptor ring */ 1114 int size; 1115 1116 /* Number of currently used Tx DMA descriptor in the 1117 * descriptor ring 1118 */ 1119 int count; 1120 1121 /* Number of Tx DMA descriptors reserved for each CPU */ 1122 int reserved_num; 1123 1124 /* Index of last TX DMA descriptor that was inserted */ 1125 int txq_put_index; 1126 1127 /* Index of the TX DMA descriptor to be cleaned up */ 1128 int txq_get_index; 1129 }; 1130 1131 struct mvpp2_tx_queue { 1132 /* Physical number of this Tx queue */ 1133 u8 id; 1134 1135 /* Logical number of this Tx queue */ 1136 u8 log_id; 1137 1138 /* Number of Tx DMA descriptors in the descriptor ring */ 1139 int size; 1140 1141 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1142 int count; 1143 1144 /* Per-CPU control of physical Tx queues */ 1145 struct mvpp2_txq_pcpu __percpu *pcpu; 1146 1147 u32 done_pkts_coal; 1148 1149 /* Virtual address of thex Tx DMA descriptors array */ 1150 struct mvpp2_tx_desc *descs; 1151 1152 /* DMA address of the Tx DMA descriptors array */ 1153 dma_addr_t descs_dma; 1154 1155 /* Index of the last Tx DMA descriptor */ 1156 int last_desc; 1157 1158 /* Index of the next Tx DMA descriptor to process */ 1159 int next_desc_to_proc; 1160 }; 1161 1162 struct mvpp2_rx_queue { 1163 /* RX queue number, in the range 0-31 for physical RXQs */ 1164 u8 id; 1165 1166 /* Num of rx descriptors in the rx descriptor ring */ 1167 int size; 1168 1169 u32 pkts_coal; 1170 u32 time_coal; 1171 1172 /* Virtual address of the RX DMA descriptors array */ 1173 struct mvpp2_rx_desc *descs; 1174 1175 /* DMA address of the RX DMA descriptors array */ 1176 dma_addr_t descs_dma; 1177 1178 /* Index of the last RX DMA descriptor */ 1179 int last_desc; 1180 1181 /* Index of the next RX DMA descriptor to process */ 1182 int next_desc_to_proc; 1183 1184 /* ID of port to which physical RXQ is mapped */ 1185 int port; 1186 1187 /* Port's logic RXQ number to which physical RXQ is mapped */ 1188 int logic_rxq; 1189 }; 1190 1191 union mvpp2_prs_tcam_entry { 1192 u32 word[MVPP2_PRS_TCAM_WORDS]; 1193 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1194 }; 1195 1196 union mvpp2_prs_sram_entry { 1197 u32 word[MVPP2_PRS_SRAM_WORDS]; 1198 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1199 }; 1200 1201 struct mvpp2_prs_entry { 1202 u32 index; 1203 union mvpp2_prs_tcam_entry tcam; 1204 union mvpp2_prs_sram_entry sram; 1205 }; 1206 1207 struct mvpp2_prs_shadow { 1208 bool valid; 1209 bool finish; 1210 1211 /* Lookup ID */ 1212 int lu; 1213 1214 /* User defined offset */ 1215 int udf; 1216 1217 /* Result info */ 1218 u32 ri; 1219 u32 ri_mask; 1220 }; 1221 1222 struct mvpp2_cls_flow_entry { 1223 u32 index; 1224 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1225 }; 1226 1227 struct mvpp2_cls_lookup_entry { 1228 u32 lkpid; 1229 u32 way; 1230 u32 data; 1231 }; 1232 1233 struct mvpp2_bm_pool { 1234 /* Pool number in the range 0-7 */ 1235 int id; 1236 enum mvpp2_bm_type type; 1237 1238 /* Buffer Pointers Pool External (BPPE) size */ 1239 int size; 1240 /* Number of buffers for this pool */ 1241 int buf_num; 1242 /* Pool buffer size */ 1243 int buf_size; 1244 /* Packet size */ 1245 int pkt_size; 1246 1247 /* BPPE virtual base address */ 1248 unsigned long *virt_addr; 1249 /* BPPE DMA base address */ 1250 dma_addr_t dma_addr; 1251 1252 /* Ports using BM pool */ 1253 u32 port_map; 1254 }; 1255 1256 /* Static declaractions */ 1257 1258 /* Number of RXQs used by single port */ 1259 static int rxq_number = MVPP2_DEFAULT_RXQ; 1260 /* Number of TXQs used by single port */ 1261 static int txq_number = MVPP2_DEFAULT_TXQ; 1262 1263 static int base_id; 1264 1265 #define MVPP2_DRIVER_NAME "mvpp2" 1266 #define MVPP2_DRIVER_VERSION "1.0" 1267 1268 /* 1269 * U-Boot internal data, mostly uncached buffers for descriptors and data 1270 */ 1271 struct buffer_location { 1272 struct mvpp2_tx_desc *aggr_tx_descs; 1273 struct mvpp2_tx_desc *tx_descs; 1274 struct mvpp2_rx_desc *rx_descs; 1275 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1276 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1277 int first_rxq; 1278 }; 1279 1280 /* 1281 * All 4 interfaces use the same global buffer, since only one interface 1282 * can be enabled at once 1283 */ 1284 static struct buffer_location buffer_loc; 1285 1286 /* 1287 * Page table entries are set to 1MB, or multiples of 1MB 1288 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1289 */ 1290 #define BD_SPACE (1 << 20) 1291 1292 /* Utility/helper methods */ 1293 1294 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1295 { 1296 writel(data, priv->base + offset); 1297 } 1298 1299 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1300 { 1301 return readl(priv->base + offset); 1302 } 1303 1304 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1305 struct mvpp2_tx_desc *tx_desc, 1306 dma_addr_t dma_addr) 1307 { 1308 if (port->priv->hw_version == MVPP21) { 1309 tx_desc->pp21.buf_dma_addr = dma_addr; 1310 } else { 1311 u64 val = (u64)dma_addr; 1312 1313 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1314 tx_desc->pp22.buf_dma_addr_ptp |= val; 1315 } 1316 } 1317 1318 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1319 struct mvpp2_tx_desc *tx_desc, 1320 size_t size) 1321 { 1322 if (port->priv->hw_version == MVPP21) 1323 tx_desc->pp21.data_size = size; 1324 else 1325 tx_desc->pp22.data_size = size; 1326 } 1327 1328 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1329 struct mvpp2_tx_desc *tx_desc, 1330 unsigned int txq) 1331 { 1332 if (port->priv->hw_version == MVPP21) 1333 tx_desc->pp21.phys_txq = txq; 1334 else 1335 tx_desc->pp22.phys_txq = txq; 1336 } 1337 1338 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1339 struct mvpp2_tx_desc *tx_desc, 1340 unsigned int command) 1341 { 1342 if (port->priv->hw_version == MVPP21) 1343 tx_desc->pp21.command = command; 1344 else 1345 tx_desc->pp22.command = command; 1346 } 1347 1348 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1349 struct mvpp2_tx_desc *tx_desc, 1350 unsigned int offset) 1351 { 1352 if (port->priv->hw_version == MVPP21) 1353 tx_desc->pp21.packet_offset = offset; 1354 else 1355 tx_desc->pp22.packet_offset = offset; 1356 } 1357 1358 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1359 struct mvpp2_rx_desc *rx_desc) 1360 { 1361 if (port->priv->hw_version == MVPP21) 1362 return rx_desc->pp21.buf_dma_addr; 1363 else 1364 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1365 } 1366 1367 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1368 struct mvpp2_rx_desc *rx_desc) 1369 { 1370 if (port->priv->hw_version == MVPP21) 1371 return rx_desc->pp21.buf_cookie; 1372 else 1373 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1374 } 1375 1376 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1377 struct mvpp2_rx_desc *rx_desc) 1378 { 1379 if (port->priv->hw_version == MVPP21) 1380 return rx_desc->pp21.data_size; 1381 else 1382 return rx_desc->pp22.data_size; 1383 } 1384 1385 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1386 struct mvpp2_rx_desc *rx_desc) 1387 { 1388 if (port->priv->hw_version == MVPP21) 1389 return rx_desc->pp21.status; 1390 else 1391 return rx_desc->pp22.status; 1392 } 1393 1394 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1395 { 1396 txq_pcpu->txq_get_index++; 1397 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1398 txq_pcpu->txq_get_index = 0; 1399 } 1400 1401 /* Get number of physical egress port */ 1402 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1403 { 1404 return MVPP2_MAX_TCONT + port->id; 1405 } 1406 1407 /* Get number of physical TXQ */ 1408 static inline int mvpp2_txq_phys(int port, int txq) 1409 { 1410 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1411 } 1412 1413 /* Parser configuration routines */ 1414 1415 /* Update parser tcam and sram hw entries */ 1416 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1417 { 1418 int i; 1419 1420 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1421 return -EINVAL; 1422 1423 /* Clear entry invalidation bit */ 1424 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1425 1426 /* Write tcam index - indirect access */ 1427 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1428 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1429 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1430 1431 /* Write sram index - indirect access */ 1432 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1433 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1434 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1435 1436 return 0; 1437 } 1438 1439 /* Read tcam entry from hw */ 1440 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1441 { 1442 int i; 1443 1444 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1445 return -EINVAL; 1446 1447 /* Write tcam index - indirect access */ 1448 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1449 1450 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1451 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1452 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1453 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1454 1455 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1456 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1457 1458 /* Write sram index - indirect access */ 1459 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1460 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1461 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1462 1463 return 0; 1464 } 1465 1466 /* Invalidate tcam hw entry */ 1467 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1468 { 1469 /* Write index - indirect access */ 1470 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1471 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1472 MVPP2_PRS_TCAM_INV_MASK); 1473 } 1474 1475 /* Enable shadow table entry and set its lookup ID */ 1476 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1477 { 1478 priv->prs_shadow[index].valid = true; 1479 priv->prs_shadow[index].lu = lu; 1480 } 1481 1482 /* Update ri fields in shadow table entry */ 1483 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1484 unsigned int ri, unsigned int ri_mask) 1485 { 1486 priv->prs_shadow[index].ri_mask = ri_mask; 1487 priv->prs_shadow[index].ri = ri; 1488 } 1489 1490 /* Update lookup field in tcam sw entry */ 1491 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1492 { 1493 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1494 1495 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1496 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1497 } 1498 1499 /* Update mask for single port in tcam sw entry */ 1500 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1501 unsigned int port, bool add) 1502 { 1503 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1504 1505 if (add) 1506 pe->tcam.byte[enable_off] &= ~(1 << port); 1507 else 1508 pe->tcam.byte[enable_off] |= 1 << port; 1509 } 1510 1511 /* Update port map in tcam sw entry */ 1512 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1513 unsigned int ports) 1514 { 1515 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1516 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1517 1518 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1519 pe->tcam.byte[enable_off] &= ~port_mask; 1520 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1521 } 1522 1523 /* Obtain port map from tcam sw entry */ 1524 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1525 { 1526 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1527 1528 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1529 } 1530 1531 /* Set byte of data and its enable bits in tcam sw entry */ 1532 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1533 unsigned int offs, unsigned char byte, 1534 unsigned char enable) 1535 { 1536 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1537 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1538 } 1539 1540 /* Get byte of data and its enable bits from tcam sw entry */ 1541 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1542 unsigned int offs, unsigned char *byte, 1543 unsigned char *enable) 1544 { 1545 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1546 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1547 } 1548 1549 /* Set ethertype in tcam sw entry */ 1550 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1551 unsigned short ethertype) 1552 { 1553 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1554 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1555 } 1556 1557 /* Set bits in sram sw entry */ 1558 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1559 int val) 1560 { 1561 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1562 } 1563 1564 /* Clear bits in sram sw entry */ 1565 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1566 int val) 1567 { 1568 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1569 } 1570 1571 /* Update ri bits in sram sw entry */ 1572 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1573 unsigned int bits, unsigned int mask) 1574 { 1575 unsigned int i; 1576 1577 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1578 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1579 1580 if (!(mask & BIT(i))) 1581 continue; 1582 1583 if (bits & BIT(i)) 1584 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1585 else 1586 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1587 1588 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1589 } 1590 } 1591 1592 /* Update ai bits in sram sw entry */ 1593 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1594 unsigned int bits, unsigned int mask) 1595 { 1596 unsigned int i; 1597 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1598 1599 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1600 1601 if (!(mask & BIT(i))) 1602 continue; 1603 1604 if (bits & BIT(i)) 1605 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1606 else 1607 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1608 1609 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1610 } 1611 } 1612 1613 /* Read ai bits from sram sw entry */ 1614 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1615 { 1616 u8 bits; 1617 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1618 int ai_en_off = ai_off + 1; 1619 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1620 1621 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1622 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1623 1624 return bits; 1625 } 1626 1627 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1628 * lookup interation 1629 */ 1630 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1631 unsigned int lu) 1632 { 1633 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1634 1635 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1636 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1637 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1638 } 1639 1640 /* In the sram sw entry set sign and value of the next lookup offset 1641 * and the offset value generated to the classifier 1642 */ 1643 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1644 unsigned int op) 1645 { 1646 /* Set sign */ 1647 if (shift < 0) { 1648 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1649 shift = 0 - shift; 1650 } else { 1651 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1652 } 1653 1654 /* Set value */ 1655 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1656 (unsigned char)shift; 1657 1658 /* Reset and set operation */ 1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1660 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1661 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1662 1663 /* Set base offset as current */ 1664 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1665 } 1666 1667 /* In the sram sw entry set sign and value of the user defined offset 1668 * generated to the classifier 1669 */ 1670 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1671 unsigned int type, int offset, 1672 unsigned int op) 1673 { 1674 /* Set sign */ 1675 if (offset < 0) { 1676 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1677 offset = 0 - offset; 1678 } else { 1679 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1680 } 1681 1682 /* Set value */ 1683 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1684 MVPP2_PRS_SRAM_UDF_MASK); 1685 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1686 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1687 MVPP2_PRS_SRAM_UDF_BITS)] &= 1688 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1689 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1690 MVPP2_PRS_SRAM_UDF_BITS)] |= 1691 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1692 1693 /* Set offset type */ 1694 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1695 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1696 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1697 1698 /* Set offset operation */ 1699 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1700 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1701 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1702 1703 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1704 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1705 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1706 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1707 1708 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1709 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1710 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1711 1712 /* Set base offset as current */ 1713 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1714 } 1715 1716 /* Find parser flow entry */ 1717 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1718 { 1719 struct mvpp2_prs_entry *pe; 1720 int tid; 1721 1722 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1723 if (!pe) 1724 return NULL; 1725 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1726 1727 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1728 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1729 u8 bits; 1730 1731 if (!priv->prs_shadow[tid].valid || 1732 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1733 continue; 1734 1735 pe->index = tid; 1736 mvpp2_prs_hw_read(priv, pe); 1737 bits = mvpp2_prs_sram_ai_get(pe); 1738 1739 /* Sram store classification lookup ID in AI bits [5:0] */ 1740 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1741 return pe; 1742 } 1743 kfree(pe); 1744 1745 return NULL; 1746 } 1747 1748 /* Return first free tcam index, seeking from start to end */ 1749 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1750 unsigned char end) 1751 { 1752 int tid; 1753 1754 if (start > end) 1755 swap(start, end); 1756 1757 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1758 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1759 1760 for (tid = start; tid <= end; tid++) { 1761 if (!priv->prs_shadow[tid].valid) 1762 return tid; 1763 } 1764 1765 return -EINVAL; 1766 } 1767 1768 /* Enable/disable dropping all mac da's */ 1769 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1770 { 1771 struct mvpp2_prs_entry pe; 1772 1773 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1774 /* Entry exist - update port only */ 1775 pe.index = MVPP2_PE_DROP_ALL; 1776 mvpp2_prs_hw_read(priv, &pe); 1777 } else { 1778 /* Entry doesn't exist - create new */ 1779 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1780 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1781 pe.index = MVPP2_PE_DROP_ALL; 1782 1783 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1784 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1785 MVPP2_PRS_RI_DROP_MASK); 1786 1787 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1788 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1789 1790 /* Update shadow table */ 1791 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1792 1793 /* Mask all ports */ 1794 mvpp2_prs_tcam_port_map_set(&pe, 0); 1795 } 1796 1797 /* Update port mask */ 1798 mvpp2_prs_tcam_port_set(&pe, port, add); 1799 1800 mvpp2_prs_hw_write(priv, &pe); 1801 } 1802 1803 /* Set port to promiscuous mode */ 1804 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1805 { 1806 struct mvpp2_prs_entry pe; 1807 1808 /* Promiscuous mode - Accept unknown packets */ 1809 1810 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1811 /* Entry exist - update port only */ 1812 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1813 mvpp2_prs_hw_read(priv, &pe); 1814 } else { 1815 /* Entry doesn't exist - create new */ 1816 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1817 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1818 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1819 1820 /* Continue - set next lookup */ 1821 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1822 1823 /* Set result info bits */ 1824 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1825 MVPP2_PRS_RI_L2_CAST_MASK); 1826 1827 /* Shift to ethertype */ 1828 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1829 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1830 1831 /* Mask all ports */ 1832 mvpp2_prs_tcam_port_map_set(&pe, 0); 1833 1834 /* Update shadow table */ 1835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1836 } 1837 1838 /* Update port mask */ 1839 mvpp2_prs_tcam_port_set(&pe, port, add); 1840 1841 mvpp2_prs_hw_write(priv, &pe); 1842 } 1843 1844 /* Accept multicast */ 1845 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1846 bool add) 1847 { 1848 struct mvpp2_prs_entry pe; 1849 unsigned char da_mc; 1850 1851 /* Ethernet multicast address first byte is 1852 * 0x01 for IPv4 and 0x33 for IPv6 1853 */ 1854 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1855 1856 if (priv->prs_shadow[index].valid) { 1857 /* Entry exist - update port only */ 1858 pe.index = index; 1859 mvpp2_prs_hw_read(priv, &pe); 1860 } else { 1861 /* Entry doesn't exist - create new */ 1862 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1863 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1864 pe.index = index; 1865 1866 /* Continue - set next lookup */ 1867 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1868 1869 /* Set result info bits */ 1870 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1871 MVPP2_PRS_RI_L2_CAST_MASK); 1872 1873 /* Update tcam entry data first byte */ 1874 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1875 1876 /* Shift to ethertype */ 1877 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1878 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1879 1880 /* Mask all ports */ 1881 mvpp2_prs_tcam_port_map_set(&pe, 0); 1882 1883 /* Update shadow table */ 1884 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1885 } 1886 1887 /* Update port mask */ 1888 mvpp2_prs_tcam_port_set(&pe, port, add); 1889 1890 mvpp2_prs_hw_write(priv, &pe); 1891 } 1892 1893 /* Parser per-port initialization */ 1894 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1895 int lu_max, int offset) 1896 { 1897 u32 val; 1898 1899 /* Set lookup ID */ 1900 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1901 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1902 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1903 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1904 1905 /* Set maximum number of loops for packet received from port */ 1906 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1907 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1908 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1909 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1910 1911 /* Set initial offset for packet header extraction for the first 1912 * searching loop 1913 */ 1914 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1915 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1916 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1917 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1918 } 1919 1920 /* Default flow entries initialization for all ports */ 1921 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1922 { 1923 struct mvpp2_prs_entry pe; 1924 int port; 1925 1926 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1927 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1928 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1929 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1930 1931 /* Mask all ports */ 1932 mvpp2_prs_tcam_port_map_set(&pe, 0); 1933 1934 /* Set flow ID*/ 1935 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1936 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1937 1938 /* Update shadow table and hw entry */ 1939 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1940 mvpp2_prs_hw_write(priv, &pe); 1941 } 1942 } 1943 1944 /* Set default entry for Marvell Header field */ 1945 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1946 { 1947 struct mvpp2_prs_entry pe; 1948 1949 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1950 1951 pe.index = MVPP2_PE_MH_DEFAULT; 1952 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1953 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1954 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1955 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1956 1957 /* Unmask all ports */ 1958 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1959 1960 /* Update shadow table and hw entry */ 1961 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1962 mvpp2_prs_hw_write(priv, &pe); 1963 } 1964 1965 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1966 * multicast MAC addresses 1967 */ 1968 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1969 { 1970 struct mvpp2_prs_entry pe; 1971 1972 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1973 1974 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1975 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1977 1978 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1979 MVPP2_PRS_RI_DROP_MASK); 1980 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1981 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1982 1983 /* Unmask all ports */ 1984 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1985 1986 /* Update shadow table and hw entry */ 1987 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1988 mvpp2_prs_hw_write(priv, &pe); 1989 1990 /* place holders only - no ports */ 1991 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1992 mvpp2_prs_mac_promisc_set(priv, 0, false); 1993 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1994 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1995 } 1996 1997 /* Match basic ethertypes */ 1998 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1999 { 2000 struct mvpp2_prs_entry pe; 2001 int tid; 2002 2003 /* Ethertype: PPPoE */ 2004 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2005 MVPP2_PE_LAST_FREE_TID); 2006 if (tid < 0) 2007 return tid; 2008 2009 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2010 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2011 pe.index = tid; 2012 2013 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2014 2015 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2016 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2017 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2018 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2019 MVPP2_PRS_RI_PPPOE_MASK); 2020 2021 /* Update shadow table and hw entry */ 2022 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2023 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2024 priv->prs_shadow[pe.index].finish = false; 2025 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2026 MVPP2_PRS_RI_PPPOE_MASK); 2027 mvpp2_prs_hw_write(priv, &pe); 2028 2029 /* Ethertype: ARP */ 2030 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2031 MVPP2_PE_LAST_FREE_TID); 2032 if (tid < 0) 2033 return tid; 2034 2035 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2036 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2037 pe.index = tid; 2038 2039 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2040 2041 /* Generate flow in the next iteration*/ 2042 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2043 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2044 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2045 MVPP2_PRS_RI_L3_PROTO_MASK); 2046 /* Set L3 offset */ 2047 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2048 MVPP2_ETH_TYPE_LEN, 2049 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2050 2051 /* Update shadow table and hw entry */ 2052 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2053 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2054 priv->prs_shadow[pe.index].finish = true; 2055 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2056 MVPP2_PRS_RI_L3_PROTO_MASK); 2057 mvpp2_prs_hw_write(priv, &pe); 2058 2059 /* Ethertype: LBTD */ 2060 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2061 MVPP2_PE_LAST_FREE_TID); 2062 if (tid < 0) 2063 return tid; 2064 2065 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2066 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2067 pe.index = tid; 2068 2069 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2070 2071 /* Generate flow in the next iteration*/ 2072 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2073 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2074 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2075 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2076 MVPP2_PRS_RI_CPU_CODE_MASK | 2077 MVPP2_PRS_RI_UDF3_MASK); 2078 /* Set L3 offset */ 2079 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2080 MVPP2_ETH_TYPE_LEN, 2081 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2082 2083 /* Update shadow table and hw entry */ 2084 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2085 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2086 priv->prs_shadow[pe.index].finish = true; 2087 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2088 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2089 MVPP2_PRS_RI_CPU_CODE_MASK | 2090 MVPP2_PRS_RI_UDF3_MASK); 2091 mvpp2_prs_hw_write(priv, &pe); 2092 2093 /* Ethertype: IPv4 without options */ 2094 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2095 MVPP2_PE_LAST_FREE_TID); 2096 if (tid < 0) 2097 return tid; 2098 2099 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2100 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2101 pe.index = tid; 2102 2103 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2104 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2105 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2106 MVPP2_PRS_IPV4_HEAD_MASK | 2107 MVPP2_PRS_IPV4_IHL_MASK); 2108 2109 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2110 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2111 MVPP2_PRS_RI_L3_PROTO_MASK); 2112 /* Skip eth_type + 4 bytes of IP header */ 2113 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2114 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2115 /* Set L3 offset */ 2116 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2117 MVPP2_ETH_TYPE_LEN, 2118 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2119 2120 /* Update shadow table and hw entry */ 2121 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2122 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2123 priv->prs_shadow[pe.index].finish = false; 2124 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2125 MVPP2_PRS_RI_L3_PROTO_MASK); 2126 mvpp2_prs_hw_write(priv, &pe); 2127 2128 /* Ethertype: IPv4 with options */ 2129 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2130 MVPP2_PE_LAST_FREE_TID); 2131 if (tid < 0) 2132 return tid; 2133 2134 pe.index = tid; 2135 2136 /* Clear tcam data before updating */ 2137 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2138 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2139 2140 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2141 MVPP2_PRS_IPV4_HEAD, 2142 MVPP2_PRS_IPV4_HEAD_MASK); 2143 2144 /* Clear ri before updating */ 2145 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2146 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2147 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2148 MVPP2_PRS_RI_L3_PROTO_MASK); 2149 2150 /* Update shadow table and hw entry */ 2151 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2152 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2153 priv->prs_shadow[pe.index].finish = false; 2154 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2155 MVPP2_PRS_RI_L3_PROTO_MASK); 2156 mvpp2_prs_hw_write(priv, &pe); 2157 2158 /* Ethertype: IPv6 without options */ 2159 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2160 MVPP2_PE_LAST_FREE_TID); 2161 if (tid < 0) 2162 return tid; 2163 2164 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2165 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2166 pe.index = tid; 2167 2168 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2169 2170 /* Skip DIP of IPV6 header */ 2171 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2172 MVPP2_MAX_L3_ADDR_SIZE, 2173 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2174 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2175 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2176 MVPP2_PRS_RI_L3_PROTO_MASK); 2177 /* Set L3 offset */ 2178 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2179 MVPP2_ETH_TYPE_LEN, 2180 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2181 2182 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2183 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2184 priv->prs_shadow[pe.index].finish = false; 2185 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2186 MVPP2_PRS_RI_L3_PROTO_MASK); 2187 mvpp2_prs_hw_write(priv, &pe); 2188 2189 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2190 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2191 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2192 pe.index = MVPP2_PE_ETH_TYPE_UN; 2193 2194 /* Unmask all ports */ 2195 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2196 2197 /* Generate flow in the next iteration*/ 2198 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2199 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2200 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2201 MVPP2_PRS_RI_L3_PROTO_MASK); 2202 /* Set L3 offset even it's unknown L3 */ 2203 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2204 MVPP2_ETH_TYPE_LEN, 2205 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2206 2207 /* Update shadow table and hw entry */ 2208 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2209 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2210 priv->prs_shadow[pe.index].finish = true; 2211 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2212 MVPP2_PRS_RI_L3_PROTO_MASK); 2213 mvpp2_prs_hw_write(priv, &pe); 2214 2215 return 0; 2216 } 2217 2218 /* Parser default initialization */ 2219 static int mvpp2_prs_default_init(struct udevice *dev, 2220 struct mvpp2 *priv) 2221 { 2222 int err, index, i; 2223 2224 /* Enable tcam table */ 2225 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2226 2227 /* Clear all tcam and sram entries */ 2228 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2229 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2230 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2231 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2232 2233 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2234 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2235 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2236 } 2237 2238 /* Invalidate all tcam entries */ 2239 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2240 mvpp2_prs_hw_inv(priv, index); 2241 2242 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2243 sizeof(struct mvpp2_prs_shadow), 2244 GFP_KERNEL); 2245 if (!priv->prs_shadow) 2246 return -ENOMEM; 2247 2248 /* Always start from lookup = 0 */ 2249 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2250 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2251 MVPP2_PRS_PORT_LU_MAX, 0); 2252 2253 mvpp2_prs_def_flow_init(priv); 2254 2255 mvpp2_prs_mh_init(priv); 2256 2257 mvpp2_prs_mac_init(priv); 2258 2259 err = mvpp2_prs_etype_init(priv); 2260 if (err) 2261 return err; 2262 2263 return 0; 2264 } 2265 2266 /* Compare MAC DA with tcam entry data */ 2267 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2268 const u8 *da, unsigned char *mask) 2269 { 2270 unsigned char tcam_byte, tcam_mask; 2271 int index; 2272 2273 for (index = 0; index < ETH_ALEN; index++) { 2274 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2275 if (tcam_mask != mask[index]) 2276 return false; 2277 2278 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2279 return false; 2280 } 2281 2282 return true; 2283 } 2284 2285 /* Find tcam entry with matched pair <MAC DA, port> */ 2286 static struct mvpp2_prs_entry * 2287 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2288 unsigned char *mask, int udf_type) 2289 { 2290 struct mvpp2_prs_entry *pe; 2291 int tid; 2292 2293 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2294 if (!pe) 2295 return NULL; 2296 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2297 2298 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2299 for (tid = MVPP2_PE_FIRST_FREE_TID; 2300 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2301 unsigned int entry_pmap; 2302 2303 if (!priv->prs_shadow[tid].valid || 2304 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2305 (priv->prs_shadow[tid].udf != udf_type)) 2306 continue; 2307 2308 pe->index = tid; 2309 mvpp2_prs_hw_read(priv, pe); 2310 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2311 2312 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2313 entry_pmap == pmap) 2314 return pe; 2315 } 2316 kfree(pe); 2317 2318 return NULL; 2319 } 2320 2321 /* Update parser's mac da entry */ 2322 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2323 const u8 *da, bool add) 2324 { 2325 struct mvpp2_prs_entry *pe; 2326 unsigned int pmap, len, ri; 2327 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2328 int tid; 2329 2330 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2331 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2332 MVPP2_PRS_UDF_MAC_DEF); 2333 2334 /* No such entry */ 2335 if (!pe) { 2336 if (!add) 2337 return 0; 2338 2339 /* Create new TCAM entry */ 2340 /* Find first range mac entry*/ 2341 for (tid = MVPP2_PE_FIRST_FREE_TID; 2342 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2343 if (priv->prs_shadow[tid].valid && 2344 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2345 (priv->prs_shadow[tid].udf == 2346 MVPP2_PRS_UDF_MAC_RANGE)) 2347 break; 2348 2349 /* Go through the all entries from first to last */ 2350 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2351 tid - 1); 2352 if (tid < 0) 2353 return tid; 2354 2355 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2356 if (!pe) 2357 return -1; 2358 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2359 pe->index = tid; 2360 2361 /* Mask all ports */ 2362 mvpp2_prs_tcam_port_map_set(pe, 0); 2363 } 2364 2365 /* Update port mask */ 2366 mvpp2_prs_tcam_port_set(pe, port, add); 2367 2368 /* Invalidate the entry if no ports are left enabled */ 2369 pmap = mvpp2_prs_tcam_port_map_get(pe); 2370 if (pmap == 0) { 2371 if (add) { 2372 kfree(pe); 2373 return -1; 2374 } 2375 mvpp2_prs_hw_inv(priv, pe->index); 2376 priv->prs_shadow[pe->index].valid = false; 2377 kfree(pe); 2378 return 0; 2379 } 2380 2381 /* Continue - set next lookup */ 2382 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2383 2384 /* Set match on DA */ 2385 len = ETH_ALEN; 2386 while (len--) 2387 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2388 2389 /* Set result info bits */ 2390 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2391 2392 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2393 MVPP2_PRS_RI_MAC_ME_MASK); 2394 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2395 MVPP2_PRS_RI_MAC_ME_MASK); 2396 2397 /* Shift to ethertype */ 2398 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2399 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2400 2401 /* Update shadow table and hw entry */ 2402 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2403 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2404 mvpp2_prs_hw_write(priv, pe); 2405 2406 kfree(pe); 2407 2408 return 0; 2409 } 2410 2411 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2412 { 2413 int err; 2414 2415 /* Remove old parser entry */ 2416 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2417 false); 2418 if (err) 2419 return err; 2420 2421 /* Add new parser entry */ 2422 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2423 if (err) 2424 return err; 2425 2426 /* Set addr in the device */ 2427 memcpy(port->dev_addr, da, ETH_ALEN); 2428 2429 return 0; 2430 } 2431 2432 /* Set prs flow for the port */ 2433 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2434 { 2435 struct mvpp2_prs_entry *pe; 2436 int tid; 2437 2438 pe = mvpp2_prs_flow_find(port->priv, port->id); 2439 2440 /* Such entry not exist */ 2441 if (!pe) { 2442 /* Go through the all entires from last to first */ 2443 tid = mvpp2_prs_tcam_first_free(port->priv, 2444 MVPP2_PE_LAST_FREE_TID, 2445 MVPP2_PE_FIRST_FREE_TID); 2446 if (tid < 0) 2447 return tid; 2448 2449 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2450 if (!pe) 2451 return -ENOMEM; 2452 2453 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2454 pe->index = tid; 2455 2456 /* Set flow ID*/ 2457 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2458 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2459 2460 /* Update shadow table */ 2461 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2462 } 2463 2464 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2465 mvpp2_prs_hw_write(port->priv, pe); 2466 kfree(pe); 2467 2468 return 0; 2469 } 2470 2471 /* Classifier configuration routines */ 2472 2473 /* Update classification flow table registers */ 2474 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2475 struct mvpp2_cls_flow_entry *fe) 2476 { 2477 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2478 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2479 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2480 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2481 } 2482 2483 /* Update classification lookup table register */ 2484 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2485 struct mvpp2_cls_lookup_entry *le) 2486 { 2487 u32 val; 2488 2489 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2490 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2491 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2492 } 2493 2494 /* Classifier default initialization */ 2495 static void mvpp2_cls_init(struct mvpp2 *priv) 2496 { 2497 struct mvpp2_cls_lookup_entry le; 2498 struct mvpp2_cls_flow_entry fe; 2499 int index; 2500 2501 /* Enable classifier */ 2502 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2503 2504 /* Clear classifier flow table */ 2505 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2506 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2507 fe.index = index; 2508 mvpp2_cls_flow_write(priv, &fe); 2509 } 2510 2511 /* Clear classifier lookup table */ 2512 le.data = 0; 2513 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2514 le.lkpid = index; 2515 le.way = 0; 2516 mvpp2_cls_lookup_write(priv, &le); 2517 2518 le.way = 1; 2519 mvpp2_cls_lookup_write(priv, &le); 2520 } 2521 } 2522 2523 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2524 { 2525 struct mvpp2_cls_lookup_entry le; 2526 u32 val; 2527 2528 /* Set way for the port */ 2529 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2530 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2531 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2532 2533 /* Pick the entry to be accessed in lookup ID decoding table 2534 * according to the way and lkpid. 2535 */ 2536 le.lkpid = port->id; 2537 le.way = 0; 2538 le.data = 0; 2539 2540 /* Set initial CPU queue for receiving packets */ 2541 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2542 le.data |= port->first_rxq; 2543 2544 /* Disable classification engines */ 2545 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2546 2547 /* Update lookup ID table entry */ 2548 mvpp2_cls_lookup_write(port->priv, &le); 2549 } 2550 2551 /* Set CPU queue number for oversize packets */ 2552 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2553 { 2554 u32 val; 2555 2556 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2557 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2558 2559 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2560 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2561 2562 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2563 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2564 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2565 } 2566 2567 /* Buffer Manager configuration routines */ 2568 2569 /* Create pool */ 2570 static int mvpp2_bm_pool_create(struct udevice *dev, 2571 struct mvpp2 *priv, 2572 struct mvpp2_bm_pool *bm_pool, int size) 2573 { 2574 u32 val; 2575 2576 /* Number of buffer pointers must be a multiple of 16, as per 2577 * hardware constraints 2578 */ 2579 if (!IS_ALIGNED(size, 16)) 2580 return -EINVAL; 2581 2582 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2583 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2584 if (!bm_pool->virt_addr) 2585 return -ENOMEM; 2586 2587 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2588 MVPP2_BM_POOL_PTR_ALIGN)) { 2589 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2590 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2591 return -ENOMEM; 2592 } 2593 2594 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2595 lower_32_bits(bm_pool->dma_addr)); 2596 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2597 2598 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2599 val |= MVPP2_BM_START_MASK; 2600 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2601 2602 bm_pool->type = MVPP2_BM_FREE; 2603 bm_pool->size = size; 2604 bm_pool->pkt_size = 0; 2605 bm_pool->buf_num = 0; 2606 2607 return 0; 2608 } 2609 2610 /* Set pool buffer size */ 2611 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2612 struct mvpp2_bm_pool *bm_pool, 2613 int buf_size) 2614 { 2615 u32 val; 2616 2617 bm_pool->buf_size = buf_size; 2618 2619 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2620 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2621 } 2622 2623 /* Free all buffers from the pool */ 2624 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2625 struct mvpp2_bm_pool *bm_pool) 2626 { 2627 int i; 2628 2629 for (i = 0; i < bm_pool->buf_num; i++) { 2630 /* Allocate buffer back from the buffer manager */ 2631 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2632 } 2633 2634 bm_pool->buf_num = 0; 2635 } 2636 2637 /* Cleanup pool */ 2638 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2639 struct mvpp2 *priv, 2640 struct mvpp2_bm_pool *bm_pool) 2641 { 2642 u32 val; 2643 2644 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2645 if (bm_pool->buf_num) { 2646 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2647 return 0; 2648 } 2649 2650 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2651 val |= MVPP2_BM_STOP_MASK; 2652 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2653 2654 return 0; 2655 } 2656 2657 static int mvpp2_bm_pools_init(struct udevice *dev, 2658 struct mvpp2 *priv) 2659 { 2660 int i, err, size; 2661 struct mvpp2_bm_pool *bm_pool; 2662 2663 /* Create all pools with maximum size */ 2664 size = MVPP2_BM_POOL_SIZE_MAX; 2665 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2666 bm_pool = &priv->bm_pools[i]; 2667 bm_pool->id = i; 2668 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2669 if (err) 2670 goto err_unroll_pools; 2671 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 2672 } 2673 return 0; 2674 2675 err_unroll_pools: 2676 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2677 for (i = i - 1; i >= 0; i--) 2678 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2679 return err; 2680 } 2681 2682 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2683 { 2684 int i, err; 2685 2686 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2687 /* Mask BM all interrupts */ 2688 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2689 /* Clear BM cause register */ 2690 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2691 } 2692 2693 /* Allocate and initialize BM pools */ 2694 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2695 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2696 if (!priv->bm_pools) 2697 return -ENOMEM; 2698 2699 err = mvpp2_bm_pools_init(dev, priv); 2700 if (err < 0) 2701 return err; 2702 return 0; 2703 } 2704 2705 /* Attach long pool to rxq */ 2706 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2707 int lrxq, int long_pool) 2708 { 2709 u32 val, mask; 2710 int prxq; 2711 2712 /* Get queue physical ID */ 2713 prxq = port->rxqs[lrxq]->id; 2714 2715 if (port->priv->hw_version == MVPP21) 2716 mask = MVPP21_RXQ_POOL_LONG_MASK; 2717 else 2718 mask = MVPP22_RXQ_POOL_LONG_MASK; 2719 2720 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2721 val &= ~mask; 2722 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2723 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2724 } 2725 2726 /* Set pool number in a BM cookie */ 2727 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2728 { 2729 u32 bm; 2730 2731 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2732 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2733 2734 return bm; 2735 } 2736 2737 /* Get pool number from a BM cookie */ 2738 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2739 { 2740 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2741 } 2742 2743 /* Release buffer to BM */ 2744 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2745 dma_addr_t buf_dma_addr, 2746 unsigned long buf_phys_addr) 2747 { 2748 if (port->priv->hw_version == MVPP22) { 2749 u32 val = 0; 2750 2751 if (sizeof(dma_addr_t) == 8) 2752 val |= upper_32_bits(buf_dma_addr) & 2753 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2754 2755 if (sizeof(phys_addr_t) == 8) 2756 val |= (upper_32_bits(buf_phys_addr) 2757 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2758 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2759 2760 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2761 } 2762 2763 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2764 * returned in the "cookie" field of the RX 2765 * descriptor. Instead of storing the virtual address, we 2766 * store the physical address 2767 */ 2768 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2769 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2770 } 2771 2772 /* Refill BM pool */ 2773 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2774 dma_addr_t dma_addr, 2775 phys_addr_t phys_addr) 2776 { 2777 int pool = mvpp2_bm_cookie_pool_get(bm); 2778 2779 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2780 } 2781 2782 /* Allocate buffers for the pool */ 2783 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2784 struct mvpp2_bm_pool *bm_pool, int buf_num) 2785 { 2786 int i; 2787 2788 if (buf_num < 0 || 2789 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2790 netdev_err(port->dev, 2791 "cannot allocate %d buffers for pool %d\n", 2792 buf_num, bm_pool->id); 2793 return 0; 2794 } 2795 2796 for (i = 0; i < buf_num; i++) { 2797 mvpp2_bm_pool_put(port, bm_pool->id, 2798 (dma_addr_t)buffer_loc.rx_buffer[i], 2799 (unsigned long)buffer_loc.rx_buffer[i]); 2800 2801 } 2802 2803 /* Update BM driver with number of buffers added to pool */ 2804 bm_pool->buf_num += i; 2805 2806 return i; 2807 } 2808 2809 /* Notify the driver that BM pool is being used as specific type and return the 2810 * pool pointer on success 2811 */ 2812 static struct mvpp2_bm_pool * 2813 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2814 int pkt_size) 2815 { 2816 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2817 int num; 2818 2819 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2820 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2821 return NULL; 2822 } 2823 2824 if (new_pool->type == MVPP2_BM_FREE) 2825 new_pool->type = type; 2826 2827 /* Allocate buffers in case BM pool is used as long pool, but packet 2828 * size doesn't match MTU or BM pool hasn't being used yet 2829 */ 2830 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2831 (new_pool->pkt_size == 0)) { 2832 int pkts_num; 2833 2834 /* Set default buffer number or free all the buffers in case 2835 * the pool is not empty 2836 */ 2837 pkts_num = new_pool->buf_num; 2838 if (pkts_num == 0) 2839 pkts_num = type == MVPP2_BM_SWF_LONG ? 2840 MVPP2_BM_LONG_BUF_NUM : 2841 MVPP2_BM_SHORT_BUF_NUM; 2842 else 2843 mvpp2_bm_bufs_free(NULL, 2844 port->priv, new_pool); 2845 2846 new_pool->pkt_size = pkt_size; 2847 2848 /* Allocate buffers for this pool */ 2849 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2850 if (num != pkts_num) { 2851 dev_err(dev, "pool %d: %d of %d allocated\n", 2852 new_pool->id, num, pkts_num); 2853 return NULL; 2854 } 2855 } 2856 2857 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 2858 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 2859 2860 return new_pool; 2861 } 2862 2863 /* Initialize pools for swf */ 2864 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2865 { 2866 int rxq; 2867 2868 if (!port->pool_long) { 2869 port->pool_long = 2870 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2871 MVPP2_BM_SWF_LONG, 2872 port->pkt_size); 2873 if (!port->pool_long) 2874 return -ENOMEM; 2875 2876 port->pool_long->port_map |= (1 << port->id); 2877 2878 for (rxq = 0; rxq < rxq_number; rxq++) 2879 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2880 } 2881 2882 return 0; 2883 } 2884 2885 /* Port configuration routines */ 2886 2887 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2888 { 2889 u32 val; 2890 2891 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2892 2893 switch (port->phy_interface) { 2894 case PHY_INTERFACE_MODE_SGMII: 2895 val |= MVPP2_GMAC_INBAND_AN_MASK; 2896 break; 2897 case PHY_INTERFACE_MODE_RGMII: 2898 case PHY_INTERFACE_MODE_RGMII_ID: 2899 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2900 default: 2901 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2902 } 2903 2904 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2905 } 2906 2907 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2908 { 2909 u32 val; 2910 2911 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2912 val |= MVPP2_GMAC_FC_ADV_EN; 2913 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2914 } 2915 2916 static void mvpp2_port_enable(struct mvpp2_port *port) 2917 { 2918 u32 val; 2919 2920 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2921 val |= MVPP2_GMAC_PORT_EN_MASK; 2922 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2923 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2924 } 2925 2926 static void mvpp2_port_disable(struct mvpp2_port *port) 2927 { 2928 u32 val; 2929 2930 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2931 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2932 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2933 } 2934 2935 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2936 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2937 { 2938 u32 val; 2939 2940 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2941 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2942 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2943 } 2944 2945 /* Configure loopback port */ 2946 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2947 { 2948 u32 val; 2949 2950 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2951 2952 if (port->speed == 1000) 2953 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2954 else 2955 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2956 2957 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2958 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2959 else 2960 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2961 2962 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2963 } 2964 2965 static void mvpp2_port_reset(struct mvpp2_port *port) 2966 { 2967 u32 val; 2968 2969 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2970 ~MVPP2_GMAC_PORT_RESET_MASK; 2971 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2972 2973 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2974 MVPP2_GMAC_PORT_RESET_MASK) 2975 continue; 2976 } 2977 2978 /* Change maximum receive size of the port */ 2979 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2980 { 2981 u32 val; 2982 2983 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2984 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2985 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2986 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2987 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2988 } 2989 2990 /* PPv2.2 GoP/GMAC config */ 2991 2992 /* Set the MAC to reset or exit from reset */ 2993 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2994 { 2995 u32 val; 2996 2997 /* read - modify - write */ 2998 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2999 if (reset) 3000 val |= MVPP2_GMAC_PORT_RESET_MASK; 3001 else 3002 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 3003 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3004 3005 return 0; 3006 } 3007 3008 /* 3009 * gop_gpcs_mode_cfg 3010 * 3011 * Configure port to working with Gig PCS or don't. 3012 */ 3013 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3014 { 3015 u32 val; 3016 3017 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3018 if (en) 3019 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3020 else 3021 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3022 /* enable / disable PCS on this port */ 3023 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3024 3025 return 0; 3026 } 3027 3028 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3029 { 3030 u32 val; 3031 3032 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3033 if (en) 3034 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3035 else 3036 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3037 /* enable / disable PCS on this port */ 3038 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3039 3040 return 0; 3041 } 3042 3043 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3044 { 3045 u32 val, thresh; 3046 3047 /* 3048 * Configure minimal level of the Tx FIFO before the lower part 3049 * starts to read a packet 3050 */ 3051 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3052 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3053 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3054 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3055 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3056 3057 /* Disable bypass of sync module */ 3058 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3059 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3060 /* configure DP clock select according to mode */ 3061 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3062 /* configure QSGMII bypass according to mode */ 3063 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3064 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3065 3066 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3067 val |= MVPP2_GMAC_PORT_DIS_PADING_MASK; 3068 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3069 3070 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3071 /* 3072 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3073 * transceiver 3074 */ 3075 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3076 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3077 3078 /* configure AN 0x9268 */ 3079 val = MVPP2_GMAC_EN_PCS_AN | 3080 MVPP2_GMAC_AN_BYPASS_EN | 3081 MVPP2_GMAC_CONFIG_MII_SPEED | 3082 MVPP2_GMAC_CONFIG_GMII_SPEED | 3083 MVPP2_GMAC_FC_ADV_EN | 3084 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3085 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3086 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3087 } 3088 3089 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3090 { 3091 u32 val, thresh; 3092 3093 /* 3094 * Configure minimal level of the Tx FIFO before the lower part 3095 * starts to read a packet 3096 */ 3097 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3098 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3099 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3100 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3101 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3102 3103 /* Disable bypass of sync module */ 3104 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3105 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3106 /* configure DP clock select according to mode */ 3107 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3108 /* configure QSGMII bypass according to mode */ 3109 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3110 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3111 3112 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3113 val |= MVPP2_GMAC_PORT_DIS_PADING_MASK; 3114 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3115 3116 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3117 /* configure GIG MAC to SGMII mode */ 3118 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3119 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3120 3121 /* configure AN */ 3122 val = MVPP2_GMAC_EN_PCS_AN | 3123 MVPP2_GMAC_AN_BYPASS_EN | 3124 MVPP2_GMAC_AN_SPEED_EN | 3125 MVPP2_GMAC_EN_FC_AN | 3126 MVPP2_GMAC_AN_DUPLEX_EN | 3127 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3128 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3129 } 3130 3131 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3132 { 3133 u32 val, thresh; 3134 3135 /* 3136 * Configure minimal level of the Tx FIFO before the lower part 3137 * starts to read a packet 3138 */ 3139 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3140 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3141 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3142 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3143 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3144 3145 /* Disable bypass of sync module */ 3146 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3147 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3148 /* configure DP clock select according to mode */ 3149 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3150 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3151 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3152 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3153 3154 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3155 val &= ~MVPP2_GMAC_PORT_DIS_PADING_MASK; 3156 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3157 3158 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3159 /* configure GIG MAC to SGMII mode */ 3160 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3161 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3162 3163 /* configure AN 0xb8e8 */ 3164 val = MVPP2_GMAC_AN_BYPASS_EN | 3165 MVPP2_GMAC_AN_SPEED_EN | 3166 MVPP2_GMAC_EN_FC_AN | 3167 MVPP2_GMAC_AN_DUPLEX_EN | 3168 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3169 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3170 } 3171 3172 /* Set the internal mux's to the required MAC in the GOP */ 3173 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3174 { 3175 u32 val; 3176 3177 /* Set TX FIFO thresholds */ 3178 switch (port->phy_interface) { 3179 case PHY_INTERFACE_MODE_SGMII: 3180 if (port->phy_speed == 2500) 3181 gop_gmac_sgmii2_5_cfg(port); 3182 else 3183 gop_gmac_sgmii_cfg(port); 3184 break; 3185 3186 case PHY_INTERFACE_MODE_RGMII: 3187 case PHY_INTERFACE_MODE_RGMII_ID: 3188 gop_gmac_rgmii_cfg(port); 3189 break; 3190 3191 default: 3192 return -1; 3193 } 3194 3195 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3196 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3197 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3198 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3199 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3200 3201 /* PeriodicXonEn disable */ 3202 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3203 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3204 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3205 3206 return 0; 3207 } 3208 3209 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3210 { 3211 u32 val; 3212 3213 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3214 if (port->gop_id > 0) 3215 return; 3216 3217 /* configure 1Gig MAC mode */ 3218 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3219 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3220 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3221 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3222 } 3223 3224 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3225 { 3226 u32 val; 3227 3228 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3229 if (reset) 3230 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3231 else 3232 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3233 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3234 3235 return 0; 3236 } 3237 3238 /* Set the internal mux's to the required PCS in the PI */ 3239 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3240 { 3241 u32 val; 3242 int lane; 3243 3244 switch (num_of_lanes) { 3245 case 1: 3246 lane = 0; 3247 break; 3248 case 2: 3249 lane = 1; 3250 break; 3251 case 4: 3252 lane = 2; 3253 break; 3254 default: 3255 return -1; 3256 } 3257 3258 /* configure XG MAC mode */ 3259 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3260 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3261 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3262 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3263 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3264 3265 return 0; 3266 } 3267 3268 static int gop_mpcs_mode(struct mvpp2_port *port) 3269 { 3270 u32 val; 3271 3272 /* configure PCS40G COMMON CONTROL */ 3273 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3274 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3275 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3276 3277 /* configure PCS CLOCK RESET */ 3278 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3279 val &= ~CLK_DIVISION_RATIO_MASK; 3280 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3281 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3282 3283 val &= ~CLK_DIV_PHASE_SET_MASK; 3284 val |= MAC_CLK_RESET_MASK; 3285 val |= RX_SD_CLK_RESET_MASK; 3286 val |= TX_SD_CLK_RESET_MASK; 3287 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3288 3289 return 0; 3290 } 3291 3292 /* Set the internal mux's to the required MAC in the GOP */ 3293 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3294 { 3295 u32 val; 3296 3297 /* configure 10G MAC mode */ 3298 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3299 val |= MVPP22_XLG_RX_FC_EN; 3300 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3301 3302 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3303 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3304 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3305 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3306 3307 /* read - modify - write */ 3308 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3309 val &= ~MVPP22_XLG_MODE_DMA_1G; 3310 val |= MVPP22_XLG_FORWARD_PFC_EN; 3311 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3312 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3313 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3314 3315 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3316 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3317 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3318 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3319 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3320 3321 /* unmask link change interrupt */ 3322 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3323 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3324 val |= 1; /* unmask summary bit */ 3325 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3326 3327 return 0; 3328 } 3329 3330 /* Set PCS to reset or exit from reset */ 3331 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3332 { 3333 u32 val; 3334 3335 /* read - modify - write */ 3336 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3337 if (reset) 3338 val &= ~MVPP22_XPCS_PCSRESET; 3339 else 3340 val |= MVPP22_XPCS_PCSRESET; 3341 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3342 3343 return 0; 3344 } 3345 3346 /* Set the MAC to reset or exit from reset */ 3347 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3348 { 3349 u32 val; 3350 3351 /* read - modify - write */ 3352 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3353 if (reset) 3354 val &= ~MVPP22_XLG_MAC_RESETN; 3355 else 3356 val |= MVPP22_XLG_MAC_RESETN; 3357 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3358 3359 return 0; 3360 } 3361 3362 /* 3363 * gop_port_init 3364 * 3365 * Init physical port. Configures the port mode and all it's elements 3366 * accordingly. 3367 * Does not verify that the selected mode/port number is valid at the 3368 * core level. 3369 */ 3370 static int gop_port_init(struct mvpp2_port *port) 3371 { 3372 int mac_num = port->gop_id; 3373 int num_of_act_lanes; 3374 3375 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3376 netdev_err(NULL, "%s: illegal port number %d", __func__, 3377 mac_num); 3378 return -1; 3379 } 3380 3381 switch (port->phy_interface) { 3382 case PHY_INTERFACE_MODE_RGMII: 3383 case PHY_INTERFACE_MODE_RGMII_ID: 3384 gop_gmac_reset(port, 1); 3385 3386 /* configure PCS */ 3387 gop_gpcs_mode_cfg(port, 0); 3388 gop_bypass_clk_cfg(port, 1); 3389 3390 /* configure MAC */ 3391 gop_gmac_mode_cfg(port); 3392 /* pcs unreset */ 3393 gop_gpcs_reset(port, 0); 3394 3395 /* mac unreset */ 3396 gop_gmac_reset(port, 0); 3397 break; 3398 3399 case PHY_INTERFACE_MODE_SGMII: 3400 /* configure PCS */ 3401 gop_gpcs_mode_cfg(port, 1); 3402 3403 /* configure MAC */ 3404 gop_gmac_mode_cfg(port); 3405 /* select proper Mac mode */ 3406 gop_xlg_2_gig_mac_cfg(port); 3407 3408 /* pcs unreset */ 3409 gop_gpcs_reset(port, 0); 3410 /* mac unreset */ 3411 gop_gmac_reset(port, 0); 3412 break; 3413 3414 case PHY_INTERFACE_MODE_SFI: 3415 num_of_act_lanes = 2; 3416 mac_num = 0; 3417 /* configure PCS */ 3418 gop_xpcs_mode(port, num_of_act_lanes); 3419 gop_mpcs_mode(port); 3420 /* configure MAC */ 3421 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3422 3423 /* pcs unreset */ 3424 gop_xpcs_reset(port, 0); 3425 3426 /* mac unreset */ 3427 gop_xlg_mac_reset(port, 0); 3428 break; 3429 3430 default: 3431 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3432 __func__, port->phy_interface); 3433 return -1; 3434 } 3435 3436 return 0; 3437 } 3438 3439 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3440 { 3441 u32 val; 3442 3443 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3444 if (enable) { 3445 /* Enable port and MIB counters update */ 3446 val |= MVPP22_XLG_PORT_EN; 3447 val &= ~MVPP22_XLG_MIBCNT_DIS; 3448 } else { 3449 /* Disable port */ 3450 val &= ~MVPP22_XLG_PORT_EN; 3451 } 3452 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3453 } 3454 3455 static void gop_port_enable(struct mvpp2_port *port, int enable) 3456 { 3457 switch (port->phy_interface) { 3458 case PHY_INTERFACE_MODE_RGMII: 3459 case PHY_INTERFACE_MODE_RGMII_ID: 3460 case PHY_INTERFACE_MODE_SGMII: 3461 if (enable) 3462 mvpp2_port_enable(port); 3463 else 3464 mvpp2_port_disable(port); 3465 break; 3466 3467 case PHY_INTERFACE_MODE_SFI: 3468 gop_xlg_mac_port_enable(port, enable); 3469 3470 break; 3471 default: 3472 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3473 port->phy_interface); 3474 return; 3475 } 3476 } 3477 3478 /* RFU1 functions */ 3479 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3480 { 3481 return readl(priv->rfu1_base + offset); 3482 } 3483 3484 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3485 { 3486 writel(data, priv->rfu1_base + offset); 3487 } 3488 3489 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3490 { 3491 u32 val = 0; 3492 3493 if (gop_id == 2) { 3494 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3495 val |= MV_NETC_GE_MAC2_SGMII; 3496 } 3497 3498 if (gop_id == 3) { 3499 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3500 val |= MV_NETC_GE_MAC3_SGMII; 3501 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3502 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3503 val |= MV_NETC_GE_MAC3_RGMII; 3504 } 3505 3506 return val; 3507 } 3508 3509 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3510 { 3511 u32 reg; 3512 3513 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3514 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3515 3516 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3517 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3518 3519 reg |= val; 3520 3521 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3522 } 3523 3524 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3525 { 3526 u32 reg; 3527 3528 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3529 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3530 3531 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3532 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3533 3534 reg |= val; 3535 3536 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3537 } 3538 3539 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3540 { 3541 u32 reg; 3542 3543 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3544 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3545 3546 val <<= NETC_GOP_SOFT_RESET_OFFS; 3547 val &= NETC_GOP_SOFT_RESET_MASK; 3548 3549 reg |= val; 3550 3551 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3552 } 3553 3554 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3555 { 3556 u32 reg; 3557 3558 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3559 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3560 3561 val <<= NETC_CLK_DIV_PHASE_OFFS; 3562 val &= NETC_CLK_DIV_PHASE_MASK; 3563 3564 reg |= val; 3565 3566 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3567 } 3568 3569 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3570 { 3571 u32 reg; 3572 3573 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3574 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3575 3576 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3577 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3578 3579 reg |= val; 3580 3581 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3582 } 3583 3584 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3585 u32 val) 3586 { 3587 u32 reg, mask, offset; 3588 3589 if (gop_id == 2) { 3590 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3591 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3592 } else { 3593 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3594 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3595 } 3596 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3597 reg &= ~mask; 3598 3599 val <<= offset; 3600 val &= mask; 3601 3602 reg |= val; 3603 3604 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3605 } 3606 3607 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3608 { 3609 u32 reg; 3610 3611 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3612 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3613 3614 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3615 val &= NETC_BUS_WIDTH_SELECT_MASK; 3616 3617 reg |= val; 3618 3619 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3620 } 3621 3622 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3623 { 3624 u32 reg; 3625 3626 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3627 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3628 3629 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3630 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3631 3632 reg |= val; 3633 3634 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3635 } 3636 3637 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3638 enum mv_netc_phase phase) 3639 { 3640 switch (phase) { 3641 case MV_NETC_FIRST_PHASE: 3642 /* Set Bus Width to HB mode = 1 */ 3643 gop_netc_bus_width_select(priv, 1); 3644 /* Select RGMII mode */ 3645 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3646 break; 3647 3648 case MV_NETC_SECOND_PHASE: 3649 /* De-assert the relevant port HB reset */ 3650 gop_netc_port_rf_reset(priv, gop_id, 1); 3651 break; 3652 } 3653 } 3654 3655 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3656 enum mv_netc_phase phase) 3657 { 3658 switch (phase) { 3659 case MV_NETC_FIRST_PHASE: 3660 /* Set Bus Width to HB mode = 1 */ 3661 gop_netc_bus_width_select(priv, 1); 3662 /* Select SGMII mode */ 3663 if (gop_id >= 1) { 3664 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3665 MV_NETC_GBE_SGMII); 3666 } 3667 3668 /* Configure the sample stages */ 3669 gop_netc_sample_stages_timing(priv, 0); 3670 /* Configure the ComPhy Selector */ 3671 /* gop_netc_com_phy_selector_config(netComplex); */ 3672 break; 3673 3674 case MV_NETC_SECOND_PHASE: 3675 /* De-assert the relevant port HB reset */ 3676 gop_netc_port_rf_reset(priv, gop_id, 1); 3677 break; 3678 } 3679 } 3680 3681 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3682 { 3683 u32 c = priv->netc_config; 3684 3685 if (c & MV_NETC_GE_MAC2_SGMII) 3686 gop_netc_mac_to_sgmii(priv, 2, phase); 3687 else 3688 gop_netc_mac_to_xgmii(priv, 2, phase); 3689 3690 if (c & MV_NETC_GE_MAC3_SGMII) { 3691 gop_netc_mac_to_sgmii(priv, 3, phase); 3692 } else { 3693 gop_netc_mac_to_xgmii(priv, 3, phase); 3694 if (c & MV_NETC_GE_MAC3_RGMII) 3695 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3696 else 3697 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3698 } 3699 3700 /* Activate gop ports 0, 2, 3 */ 3701 gop_netc_active_port(priv, 0, 1); 3702 gop_netc_active_port(priv, 2, 1); 3703 gop_netc_active_port(priv, 3, 1); 3704 3705 if (phase == MV_NETC_SECOND_PHASE) { 3706 /* Enable the GOP internal clock logic */ 3707 gop_netc_gop_clock_logic_set(priv, 1); 3708 /* De-assert GOP unit reset */ 3709 gop_netc_gop_reset(priv, 1); 3710 } 3711 3712 return 0; 3713 } 3714 3715 /* Set defaults to the MVPP2 port */ 3716 static void mvpp2_defaults_set(struct mvpp2_port *port) 3717 { 3718 int tx_port_num, val, queue, ptxq, lrxq; 3719 3720 if (port->priv->hw_version == MVPP21) { 3721 /* Configure port to loopback if needed */ 3722 if (port->flags & MVPP2_F_LOOPBACK) 3723 mvpp2_port_loopback_set(port); 3724 3725 /* Update TX FIFO MIN Threshold */ 3726 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3727 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3728 /* Min. TX threshold must be less than minimal packet length */ 3729 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3730 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3731 } 3732 3733 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3734 tx_port_num = mvpp2_egress_port(port); 3735 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3736 tx_port_num); 3737 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3738 3739 /* Close bandwidth for all queues */ 3740 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3741 ptxq = mvpp2_txq_phys(port->id, queue); 3742 mvpp2_write(port->priv, 3743 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3744 } 3745 3746 /* Set refill period to 1 usec, refill tokens 3747 * and bucket size to maximum 3748 */ 3749 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3750 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3751 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3752 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3753 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3754 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3755 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3756 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3757 3758 /* Set MaximumLowLatencyPacketSize value to 256 */ 3759 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3760 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3761 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3762 3763 /* Enable Rx cache snoop */ 3764 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3765 queue = port->rxqs[lrxq]->id; 3766 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3767 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3768 MVPP2_SNOOP_BUF_HDR_MASK; 3769 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3770 } 3771 } 3772 3773 /* Enable/disable receiving packets */ 3774 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3775 { 3776 u32 val; 3777 int lrxq, queue; 3778 3779 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3780 queue = port->rxqs[lrxq]->id; 3781 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3782 val &= ~MVPP2_RXQ_DISABLE_MASK; 3783 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3784 } 3785 } 3786 3787 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3788 { 3789 u32 val; 3790 int lrxq, queue; 3791 3792 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3793 queue = port->rxqs[lrxq]->id; 3794 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3795 val |= MVPP2_RXQ_DISABLE_MASK; 3796 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3797 } 3798 } 3799 3800 /* Enable transmit via physical egress queue 3801 * - HW starts take descriptors from DRAM 3802 */ 3803 static void mvpp2_egress_enable(struct mvpp2_port *port) 3804 { 3805 u32 qmap; 3806 int queue; 3807 int tx_port_num = mvpp2_egress_port(port); 3808 3809 /* Enable all initialized TXs. */ 3810 qmap = 0; 3811 for (queue = 0; queue < txq_number; queue++) { 3812 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3813 3814 if (txq->descs != NULL) 3815 qmap |= (1 << queue); 3816 } 3817 3818 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3819 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3820 } 3821 3822 /* Disable transmit via physical egress queue 3823 * - HW doesn't take descriptors from DRAM 3824 */ 3825 static void mvpp2_egress_disable(struct mvpp2_port *port) 3826 { 3827 u32 reg_data; 3828 int delay; 3829 int tx_port_num = mvpp2_egress_port(port); 3830 3831 /* Issue stop command for active channels only */ 3832 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3833 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3834 MVPP2_TXP_SCHED_ENQ_MASK; 3835 if (reg_data != 0) 3836 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3837 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3838 3839 /* Wait for all Tx activity to terminate. */ 3840 delay = 0; 3841 do { 3842 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3843 netdev_warn(port->dev, 3844 "Tx stop timed out, status=0x%08x\n", 3845 reg_data); 3846 break; 3847 } 3848 mdelay(1); 3849 delay++; 3850 3851 /* Check port TX Command register that all 3852 * Tx queues are stopped 3853 */ 3854 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3855 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3856 } 3857 3858 /* Rx descriptors helper methods */ 3859 3860 /* Get number of Rx descriptors occupied by received packets */ 3861 static inline int 3862 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3863 { 3864 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3865 3866 return val & MVPP2_RXQ_OCCUPIED_MASK; 3867 } 3868 3869 /* Update Rx queue status with the number of occupied and available 3870 * Rx descriptor slots. 3871 */ 3872 static inline void 3873 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3874 int used_count, int free_count) 3875 { 3876 /* Decrement the number of used descriptors and increment count 3877 * increment the number of free descriptors. 3878 */ 3879 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3880 3881 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3882 } 3883 3884 /* Get pointer to next RX descriptor to be processed by SW */ 3885 static inline struct mvpp2_rx_desc * 3886 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3887 { 3888 int rx_desc = rxq->next_desc_to_proc; 3889 3890 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3891 prefetch(rxq->descs + rxq->next_desc_to_proc); 3892 return rxq->descs + rx_desc; 3893 } 3894 3895 /* Set rx queue offset */ 3896 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3897 int prxq, int offset) 3898 { 3899 u32 val; 3900 3901 /* Convert offset from bytes to units of 32 bytes */ 3902 offset = offset >> 5; 3903 3904 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3905 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3906 3907 /* Offset is in */ 3908 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3909 MVPP2_RXQ_PACKET_OFFSET_MASK); 3910 3911 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3912 } 3913 3914 /* Obtain BM cookie information from descriptor */ 3915 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3916 struct mvpp2_rx_desc *rx_desc) 3917 { 3918 int cpu = smp_processor_id(); 3919 int pool; 3920 3921 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3922 MVPP2_RXD_BM_POOL_ID_MASK) >> 3923 MVPP2_RXD_BM_POOL_ID_OFFS; 3924 3925 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3926 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3927 } 3928 3929 /* Tx descriptors helper methods */ 3930 3931 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3932 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3933 struct mvpp2_tx_queue *txq) 3934 { 3935 u32 val; 3936 3937 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3938 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3939 3940 return val & MVPP2_TXQ_PENDING_MASK; 3941 } 3942 3943 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3944 static struct mvpp2_tx_desc * 3945 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3946 { 3947 int tx_desc = txq->next_desc_to_proc; 3948 3949 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3950 return txq->descs + tx_desc; 3951 } 3952 3953 /* Update HW with number of aggregated Tx descriptors to be sent */ 3954 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3955 { 3956 /* aggregated access - relevant TXQ number is written in TX desc */ 3957 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3958 } 3959 3960 /* Get number of sent descriptors and decrement counter. 3961 * The number of sent descriptors is returned. 3962 * Per-CPU access 3963 */ 3964 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3965 struct mvpp2_tx_queue *txq) 3966 { 3967 u32 val; 3968 3969 /* Reading status reg resets transmitted descriptor counter */ 3970 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3971 3972 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3973 MVPP2_TRANSMITTED_COUNT_OFFSET; 3974 } 3975 3976 static void mvpp2_txq_sent_counter_clear(void *arg) 3977 { 3978 struct mvpp2_port *port = arg; 3979 int queue; 3980 3981 for (queue = 0; queue < txq_number; queue++) { 3982 int id = port->txqs[queue]->id; 3983 3984 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3985 } 3986 } 3987 3988 /* Set max sizes for Tx queues */ 3989 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3990 { 3991 u32 val, size, mtu; 3992 int txq, tx_port_num; 3993 3994 mtu = port->pkt_size * 8; 3995 if (mtu > MVPP2_TXP_MTU_MAX) 3996 mtu = MVPP2_TXP_MTU_MAX; 3997 3998 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3999 mtu = 3 * mtu; 4000 4001 /* Indirect access to registers */ 4002 tx_port_num = mvpp2_egress_port(port); 4003 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4004 4005 /* Set MTU */ 4006 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 4007 val &= ~MVPP2_TXP_MTU_MAX; 4008 val |= mtu; 4009 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 4010 4011 /* TXP token size and all TXQs token size must be larger that MTU */ 4012 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4013 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4014 if (size < mtu) { 4015 size = mtu; 4016 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4017 val |= size; 4018 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4019 } 4020 4021 for (txq = 0; txq < txq_number; txq++) { 4022 val = mvpp2_read(port->priv, 4023 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4024 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4025 4026 if (size < mtu) { 4027 size = mtu; 4028 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4029 val |= size; 4030 mvpp2_write(port->priv, 4031 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4032 val); 4033 } 4034 } 4035 } 4036 4037 /* Free Tx queue skbuffs */ 4038 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4039 struct mvpp2_tx_queue *txq, 4040 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4041 { 4042 int i; 4043 4044 for (i = 0; i < num; i++) 4045 mvpp2_txq_inc_get(txq_pcpu); 4046 } 4047 4048 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4049 u32 cause) 4050 { 4051 int queue = fls(cause) - 1; 4052 4053 return port->rxqs[queue]; 4054 } 4055 4056 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4057 u32 cause) 4058 { 4059 int queue = fls(cause) - 1; 4060 4061 return port->txqs[queue]; 4062 } 4063 4064 /* Rx/Tx queue initialization/cleanup methods */ 4065 4066 /* Allocate and initialize descriptors for aggr TXQ */ 4067 static int mvpp2_aggr_txq_init(struct udevice *dev, 4068 struct mvpp2_tx_queue *aggr_txq, 4069 int desc_num, int cpu, 4070 struct mvpp2 *priv) 4071 { 4072 u32 txq_dma; 4073 4074 /* Allocate memory for TX descriptors */ 4075 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4076 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4077 if (!aggr_txq->descs) 4078 return -ENOMEM; 4079 4080 /* Make sure descriptor address is cache line size aligned */ 4081 BUG_ON(aggr_txq->descs != 4082 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4083 4084 aggr_txq->last_desc = aggr_txq->size - 1; 4085 4086 /* Aggr TXQ no reset WA */ 4087 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4088 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4089 4090 /* Set Tx descriptors queue starting address indirect 4091 * access 4092 */ 4093 if (priv->hw_version == MVPP21) 4094 txq_dma = aggr_txq->descs_dma; 4095 else 4096 txq_dma = aggr_txq->descs_dma >> 4097 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4098 4099 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4100 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4101 4102 return 0; 4103 } 4104 4105 /* Create a specified Rx queue */ 4106 static int mvpp2_rxq_init(struct mvpp2_port *port, 4107 struct mvpp2_rx_queue *rxq) 4108 4109 { 4110 u32 rxq_dma; 4111 4112 rxq->size = port->rx_ring_size; 4113 4114 /* Allocate memory for RX descriptors */ 4115 rxq->descs = buffer_loc.rx_descs; 4116 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4117 if (!rxq->descs) 4118 return -ENOMEM; 4119 4120 BUG_ON(rxq->descs != 4121 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4122 4123 rxq->last_desc = rxq->size - 1; 4124 4125 /* Zero occupied and non-occupied counters - direct access */ 4126 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4127 4128 /* Set Rx descriptors queue starting address - indirect access */ 4129 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4130 if (port->priv->hw_version == MVPP21) 4131 rxq_dma = rxq->descs_dma; 4132 else 4133 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4134 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4135 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4136 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4137 4138 /* Set Offset */ 4139 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4140 4141 /* Add number of descriptors ready for receiving packets */ 4142 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4143 4144 return 0; 4145 } 4146 4147 /* Push packets received by the RXQ to BM pool */ 4148 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4149 struct mvpp2_rx_queue *rxq) 4150 { 4151 int rx_received, i; 4152 4153 rx_received = mvpp2_rxq_received(port, rxq->id); 4154 if (!rx_received) 4155 return; 4156 4157 for (i = 0; i < rx_received; i++) { 4158 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4159 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4160 4161 mvpp2_pool_refill(port, bm, 4162 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4163 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4164 } 4165 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4166 } 4167 4168 /* Cleanup Rx queue */ 4169 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4170 struct mvpp2_rx_queue *rxq) 4171 { 4172 mvpp2_rxq_drop_pkts(port, rxq); 4173 4174 rxq->descs = NULL; 4175 rxq->last_desc = 0; 4176 rxq->next_desc_to_proc = 0; 4177 rxq->descs_dma = 0; 4178 4179 /* Clear Rx descriptors queue starting address and size; 4180 * free descriptor number 4181 */ 4182 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4183 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4184 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4185 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4186 } 4187 4188 /* Create and initialize a Tx queue */ 4189 static int mvpp2_txq_init(struct mvpp2_port *port, 4190 struct mvpp2_tx_queue *txq) 4191 { 4192 u32 val; 4193 int cpu, desc, desc_per_txq, tx_port_num; 4194 struct mvpp2_txq_pcpu *txq_pcpu; 4195 4196 txq->size = port->tx_ring_size; 4197 4198 /* Allocate memory for Tx descriptors */ 4199 txq->descs = buffer_loc.tx_descs; 4200 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4201 if (!txq->descs) 4202 return -ENOMEM; 4203 4204 /* Make sure descriptor address is cache line size aligned */ 4205 BUG_ON(txq->descs != 4206 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4207 4208 txq->last_desc = txq->size - 1; 4209 4210 /* Set Tx descriptors queue starting address - indirect access */ 4211 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4212 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4213 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4214 MVPP2_TXQ_DESC_SIZE_MASK); 4215 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4216 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4217 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4218 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4219 val &= ~MVPP2_TXQ_PENDING_MASK; 4220 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4221 4222 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4223 * for each existing TXQ. 4224 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4225 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4226 */ 4227 desc_per_txq = 16; 4228 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4229 (txq->log_id * desc_per_txq); 4230 4231 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4232 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4233 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4234 4235 /* WRR / EJP configuration - indirect access */ 4236 tx_port_num = mvpp2_egress_port(port); 4237 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4238 4239 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4240 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4241 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4242 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4243 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4244 4245 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4246 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4247 val); 4248 4249 for_each_present_cpu(cpu) { 4250 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4251 txq_pcpu->size = txq->size; 4252 } 4253 4254 return 0; 4255 } 4256 4257 /* Free allocated TXQ resources */ 4258 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4259 struct mvpp2_tx_queue *txq) 4260 { 4261 txq->descs = NULL; 4262 txq->last_desc = 0; 4263 txq->next_desc_to_proc = 0; 4264 txq->descs_dma = 0; 4265 4266 /* Set minimum bandwidth for disabled TXQs */ 4267 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4268 4269 /* Set Tx descriptors queue starting address and size */ 4270 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4271 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4272 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4273 } 4274 4275 /* Cleanup Tx ports */ 4276 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4277 { 4278 struct mvpp2_txq_pcpu *txq_pcpu; 4279 int delay, pending, cpu; 4280 u32 val; 4281 4282 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4283 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4284 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4285 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4286 4287 /* The napi queue has been stopped so wait for all packets 4288 * to be transmitted. 4289 */ 4290 delay = 0; 4291 do { 4292 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4293 netdev_warn(port->dev, 4294 "port %d: cleaning queue %d timed out\n", 4295 port->id, txq->log_id); 4296 break; 4297 } 4298 mdelay(1); 4299 delay++; 4300 4301 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4302 } while (pending); 4303 4304 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4305 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4306 4307 for_each_present_cpu(cpu) { 4308 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4309 4310 /* Release all packets */ 4311 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4312 4313 /* Reset queue */ 4314 txq_pcpu->count = 0; 4315 txq_pcpu->txq_put_index = 0; 4316 txq_pcpu->txq_get_index = 0; 4317 } 4318 } 4319 4320 /* Cleanup all Tx queues */ 4321 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4322 { 4323 struct mvpp2_tx_queue *txq; 4324 int queue; 4325 u32 val; 4326 4327 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4328 4329 /* Reset Tx ports and delete Tx queues */ 4330 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4331 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4332 4333 for (queue = 0; queue < txq_number; queue++) { 4334 txq = port->txqs[queue]; 4335 mvpp2_txq_clean(port, txq); 4336 mvpp2_txq_deinit(port, txq); 4337 } 4338 4339 mvpp2_txq_sent_counter_clear(port); 4340 4341 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4342 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4343 } 4344 4345 /* Cleanup all Rx queues */ 4346 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4347 { 4348 int queue; 4349 4350 for (queue = 0; queue < rxq_number; queue++) 4351 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4352 } 4353 4354 /* Init all Rx queues for port */ 4355 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4356 { 4357 int queue, err; 4358 4359 for (queue = 0; queue < rxq_number; queue++) { 4360 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4361 if (err) 4362 goto err_cleanup; 4363 } 4364 return 0; 4365 4366 err_cleanup: 4367 mvpp2_cleanup_rxqs(port); 4368 return err; 4369 } 4370 4371 /* Init all tx queues for port */ 4372 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4373 { 4374 struct mvpp2_tx_queue *txq; 4375 int queue, err; 4376 4377 for (queue = 0; queue < txq_number; queue++) { 4378 txq = port->txqs[queue]; 4379 err = mvpp2_txq_init(port, txq); 4380 if (err) 4381 goto err_cleanup; 4382 } 4383 4384 mvpp2_txq_sent_counter_clear(port); 4385 return 0; 4386 4387 err_cleanup: 4388 mvpp2_cleanup_txqs(port); 4389 return err; 4390 } 4391 4392 /* Adjust link */ 4393 static void mvpp2_link_event(struct mvpp2_port *port) 4394 { 4395 struct phy_device *phydev = port->phy_dev; 4396 int status_change = 0; 4397 u32 val; 4398 4399 if (phydev->link) { 4400 if ((port->speed != phydev->speed) || 4401 (port->duplex != phydev->duplex)) { 4402 u32 val; 4403 4404 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4405 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4406 MVPP2_GMAC_CONFIG_GMII_SPEED | 4407 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4408 MVPP2_GMAC_AN_SPEED_EN | 4409 MVPP2_GMAC_AN_DUPLEX_EN); 4410 4411 if (phydev->duplex) 4412 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4413 4414 if (phydev->speed == SPEED_1000) 4415 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4416 else if (phydev->speed == SPEED_100) 4417 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4418 4419 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4420 4421 port->duplex = phydev->duplex; 4422 port->speed = phydev->speed; 4423 } 4424 } 4425 4426 if (phydev->link != port->link) { 4427 if (!phydev->link) { 4428 port->duplex = -1; 4429 port->speed = 0; 4430 } 4431 4432 port->link = phydev->link; 4433 status_change = 1; 4434 } 4435 4436 if (status_change) { 4437 if (phydev->link) { 4438 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4439 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4440 MVPP2_GMAC_FORCE_LINK_DOWN); 4441 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4442 mvpp2_egress_enable(port); 4443 mvpp2_ingress_enable(port); 4444 } else { 4445 mvpp2_ingress_disable(port); 4446 mvpp2_egress_disable(port); 4447 } 4448 } 4449 } 4450 4451 /* Main RX/TX processing routines */ 4452 4453 /* Display more error info */ 4454 static void mvpp2_rx_error(struct mvpp2_port *port, 4455 struct mvpp2_rx_desc *rx_desc) 4456 { 4457 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4458 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4459 4460 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4461 case MVPP2_RXD_ERR_CRC: 4462 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4463 status, sz); 4464 break; 4465 case MVPP2_RXD_ERR_OVERRUN: 4466 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4467 status, sz); 4468 break; 4469 case MVPP2_RXD_ERR_RESOURCE: 4470 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4471 status, sz); 4472 break; 4473 } 4474 } 4475 4476 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4477 static int mvpp2_rx_refill(struct mvpp2_port *port, 4478 struct mvpp2_bm_pool *bm_pool, 4479 u32 bm, dma_addr_t dma_addr) 4480 { 4481 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4482 return 0; 4483 } 4484 4485 /* Set hw internals when starting port */ 4486 static void mvpp2_start_dev(struct mvpp2_port *port) 4487 { 4488 switch (port->phy_interface) { 4489 case PHY_INTERFACE_MODE_RGMII: 4490 case PHY_INTERFACE_MODE_RGMII_ID: 4491 case PHY_INTERFACE_MODE_SGMII: 4492 mvpp2_gmac_max_rx_size_set(port); 4493 default: 4494 break; 4495 } 4496 4497 mvpp2_txp_max_tx_size_set(port); 4498 4499 if (port->priv->hw_version == MVPP21) 4500 mvpp2_port_enable(port); 4501 else 4502 gop_port_enable(port, 1); 4503 } 4504 4505 /* Set hw internals when stopping port */ 4506 static void mvpp2_stop_dev(struct mvpp2_port *port) 4507 { 4508 /* Stop new packets from arriving to RXQs */ 4509 mvpp2_ingress_disable(port); 4510 4511 mvpp2_egress_disable(port); 4512 4513 if (port->priv->hw_version == MVPP21) 4514 mvpp2_port_disable(port); 4515 else 4516 gop_port_enable(port, 0); 4517 } 4518 4519 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4520 { 4521 struct phy_device *phy_dev; 4522 4523 if (!port->init || port->link == 0) { 4524 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 4525 port->phy_interface); 4526 port->phy_dev = phy_dev; 4527 if (!phy_dev) { 4528 netdev_err(port->dev, "cannot connect to phy\n"); 4529 return -ENODEV; 4530 } 4531 phy_dev->supported &= PHY_GBIT_FEATURES; 4532 phy_dev->advertising = phy_dev->supported; 4533 4534 port->phy_dev = phy_dev; 4535 port->link = 0; 4536 port->duplex = 0; 4537 port->speed = 0; 4538 4539 phy_config(phy_dev); 4540 phy_startup(phy_dev); 4541 if (!phy_dev->link) { 4542 printf("%s: No link\n", phy_dev->dev->name); 4543 return -1; 4544 } 4545 4546 port->init = 1; 4547 } else { 4548 mvpp2_egress_enable(port); 4549 mvpp2_ingress_enable(port); 4550 } 4551 4552 return 0; 4553 } 4554 4555 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4556 { 4557 unsigned char mac_bcast[ETH_ALEN] = { 4558 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4559 int err; 4560 4561 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4562 if (err) { 4563 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4564 return err; 4565 } 4566 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4567 port->dev_addr, true); 4568 if (err) { 4569 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4570 return err; 4571 } 4572 err = mvpp2_prs_def_flow(port); 4573 if (err) { 4574 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4575 return err; 4576 } 4577 4578 /* Allocate the Rx/Tx queues */ 4579 err = mvpp2_setup_rxqs(port); 4580 if (err) { 4581 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4582 return err; 4583 } 4584 4585 err = mvpp2_setup_txqs(port); 4586 if (err) { 4587 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4588 return err; 4589 } 4590 4591 if (port->phy_node) { 4592 err = mvpp2_phy_connect(dev, port); 4593 if (err < 0) 4594 return err; 4595 4596 mvpp2_link_event(port); 4597 } else { 4598 mvpp2_egress_enable(port); 4599 mvpp2_ingress_enable(port); 4600 } 4601 4602 mvpp2_start_dev(port); 4603 4604 return 0; 4605 } 4606 4607 /* No Device ops here in U-Boot */ 4608 4609 /* Driver initialization */ 4610 4611 static void mvpp2_port_power_up(struct mvpp2_port *port) 4612 { 4613 struct mvpp2 *priv = port->priv; 4614 4615 /* On PPv2.2 the GoP / interface configuration has already been done */ 4616 if (priv->hw_version == MVPP21) 4617 mvpp2_port_mii_set(port); 4618 mvpp2_port_periodic_xon_disable(port); 4619 if (priv->hw_version == MVPP21) 4620 mvpp2_port_fc_adv_enable(port); 4621 mvpp2_port_reset(port); 4622 } 4623 4624 /* Initialize port HW */ 4625 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4626 { 4627 struct mvpp2 *priv = port->priv; 4628 struct mvpp2_txq_pcpu *txq_pcpu; 4629 int queue, cpu, err; 4630 4631 if (port->first_rxq + rxq_number > 4632 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4633 return -EINVAL; 4634 4635 /* Disable port */ 4636 mvpp2_egress_disable(port); 4637 if (priv->hw_version == MVPP21) 4638 mvpp2_port_disable(port); 4639 else 4640 gop_port_enable(port, 0); 4641 4642 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4643 GFP_KERNEL); 4644 if (!port->txqs) 4645 return -ENOMEM; 4646 4647 /* Associate physical Tx queues to this port and initialize. 4648 * The mapping is predefined. 4649 */ 4650 for (queue = 0; queue < txq_number; queue++) { 4651 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4652 struct mvpp2_tx_queue *txq; 4653 4654 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4655 if (!txq) 4656 return -ENOMEM; 4657 4658 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4659 GFP_KERNEL); 4660 if (!txq->pcpu) 4661 return -ENOMEM; 4662 4663 txq->id = queue_phy_id; 4664 txq->log_id = queue; 4665 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4666 for_each_present_cpu(cpu) { 4667 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4668 txq_pcpu->cpu = cpu; 4669 } 4670 4671 port->txqs[queue] = txq; 4672 } 4673 4674 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4675 GFP_KERNEL); 4676 if (!port->rxqs) 4677 return -ENOMEM; 4678 4679 /* Allocate and initialize Rx queue for this port */ 4680 for (queue = 0; queue < rxq_number; queue++) { 4681 struct mvpp2_rx_queue *rxq; 4682 4683 /* Map physical Rx queue to port's logical Rx queue */ 4684 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4685 if (!rxq) 4686 return -ENOMEM; 4687 /* Map this Rx queue to a physical queue */ 4688 rxq->id = port->first_rxq + queue; 4689 rxq->port = port->id; 4690 rxq->logic_rxq = queue; 4691 4692 port->rxqs[queue] = rxq; 4693 } 4694 4695 /* Configure Rx queue group interrupt for this port */ 4696 if (priv->hw_version == MVPP21) { 4697 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 4698 CONFIG_MV_ETH_RXQ); 4699 } else { 4700 u32 val; 4701 4702 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 4703 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 4704 4705 val = (CONFIG_MV_ETH_RXQ << 4706 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 4707 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 4708 } 4709 4710 /* Create Rx descriptor rings */ 4711 for (queue = 0; queue < rxq_number; queue++) { 4712 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4713 4714 rxq->size = port->rx_ring_size; 4715 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4716 rxq->time_coal = MVPP2_RX_COAL_USEC; 4717 } 4718 4719 mvpp2_ingress_disable(port); 4720 4721 /* Port default configuration */ 4722 mvpp2_defaults_set(port); 4723 4724 /* Port's classifier configuration */ 4725 mvpp2_cls_oversize_rxq_set(port); 4726 mvpp2_cls_port_config(port); 4727 4728 /* Provide an initial Rx packet size */ 4729 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4730 4731 /* Initialize pools for swf */ 4732 err = mvpp2_swf_bm_pool_init(port); 4733 if (err) 4734 return err; 4735 4736 return 0; 4737 } 4738 4739 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4740 { 4741 int port_node = dev_of_offset(dev); 4742 const char *phy_mode_str; 4743 int phy_node, mdio_off, cp_node; 4744 u32 id; 4745 u32 phyaddr = 0; 4746 int phy_mode = -1; 4747 u64 mdio_addr; 4748 4749 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4750 4751 if (phy_node > 0) { 4752 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4753 if (phyaddr < 0) { 4754 dev_err(&pdev->dev, "could not find phy address\n"); 4755 return -1; 4756 } 4757 mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node); 4758 4759 /* TODO: This WA for mdio issue. U-boot 2017 don't have 4760 * mdio driver and on MACHIATOBin board ports from CP1 4761 * connected to mdio on CP0. 4762 * WA is to get mdio address from phy handler parent 4763 * base address. WA should be removed after 4764 * mdio driver implementation. 4765 */ 4766 mdio_addr = fdtdec_get_uint(gd->fdt_blob, 4767 mdio_off, "reg", 0); 4768 4769 cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off); 4770 mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob, 4771 cp_node); 4772 4773 port->priv->mdio_base = (void *)mdio_addr; 4774 4775 if (port->priv->mdio_base < 0) { 4776 dev_err(&pdev->dev, "could not find mdio base address\n"); 4777 return -1; 4778 } 4779 } else { 4780 phy_node = 0; 4781 } 4782 4783 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4784 if (phy_mode_str) 4785 phy_mode = phy_get_interface_by_name(phy_mode_str); 4786 if (phy_mode == -1) { 4787 dev_err(&pdev->dev, "incorrect phy mode\n"); 4788 return -EINVAL; 4789 } 4790 4791 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4792 if (id == -1) { 4793 dev_err(&pdev->dev, "missing port-id value\n"); 4794 return -EINVAL; 4795 } 4796 4797 #ifdef CONFIG_DM_GPIO 4798 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4799 &port->phy_reset_gpio, GPIOD_IS_OUT); 4800 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4801 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4802 #endif 4803 4804 /* 4805 * ToDo: 4806 * Not sure if this DT property "phy-speed" will get accepted, so 4807 * this might change later 4808 */ 4809 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4810 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4811 "phy-speed", 1000); 4812 4813 port->id = id; 4814 if (port->priv->hw_version == MVPP21) 4815 port->first_rxq = port->id * rxq_number; 4816 else 4817 port->first_rxq = port->id * port->priv->max_port_rxqs; 4818 port->phy_node = phy_node; 4819 port->phy_interface = phy_mode; 4820 port->phyaddr = phyaddr; 4821 4822 return 0; 4823 } 4824 4825 #ifdef CONFIG_DM_GPIO 4826 /* Port GPIO initialization */ 4827 static void mvpp2_gpio_init(struct mvpp2_port *port) 4828 { 4829 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4830 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4831 udelay(1000); 4832 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4833 } 4834 4835 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4836 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4837 } 4838 #endif 4839 4840 /* Ports initialization */ 4841 static int mvpp2_port_probe(struct udevice *dev, 4842 struct mvpp2_port *port, 4843 int port_node, 4844 struct mvpp2 *priv) 4845 { 4846 int err; 4847 4848 port->tx_ring_size = MVPP2_MAX_TXD; 4849 port->rx_ring_size = MVPP2_MAX_RXD; 4850 4851 err = mvpp2_port_init(dev, port); 4852 if (err < 0) { 4853 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4854 return err; 4855 } 4856 mvpp2_port_power_up(port); 4857 4858 #ifdef CONFIG_DM_GPIO 4859 mvpp2_gpio_init(port); 4860 #endif 4861 4862 priv->port_list[port->id] = port; 4863 return 0; 4864 } 4865 4866 /* Initialize decoding windows */ 4867 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4868 struct mvpp2 *priv) 4869 { 4870 u32 win_enable; 4871 int i; 4872 4873 for (i = 0; i < 6; i++) { 4874 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4875 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4876 4877 if (i < 4) 4878 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4879 } 4880 4881 win_enable = 0; 4882 4883 for (i = 0; i < dram->num_cs; i++) { 4884 const struct mbus_dram_window *cs = dram->cs + i; 4885 4886 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4887 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4888 dram->mbus_dram_target_id); 4889 4890 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4891 (cs->size - 1) & 0xffff0000); 4892 4893 win_enable |= (1 << i); 4894 } 4895 4896 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4897 } 4898 4899 /* Initialize Rx FIFO's */ 4900 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4901 { 4902 int port; 4903 4904 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4905 if (priv->hw_version == MVPP22) { 4906 if (port == 0) { 4907 mvpp2_write(priv, 4908 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4909 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4910 mvpp2_write(priv, 4911 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4912 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4913 } else if (port == 1) { 4914 mvpp2_write(priv, 4915 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4916 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4917 mvpp2_write(priv, 4918 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4919 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4920 } else { 4921 mvpp2_write(priv, 4922 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4923 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4924 mvpp2_write(priv, 4925 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4926 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4927 } 4928 } else { 4929 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4930 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4931 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4932 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4933 } 4934 } 4935 4936 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4937 MVPP2_RX_FIFO_PORT_MIN_PKT); 4938 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4939 } 4940 4941 /* Initialize Tx FIFO's */ 4942 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4943 { 4944 int port, val; 4945 4946 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4947 /* Port 0 supports 10KB TX FIFO */ 4948 if (port == 0) { 4949 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4950 MVPP22_TX_FIFO_SIZE_MASK; 4951 } else { 4952 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4953 MVPP22_TX_FIFO_SIZE_MASK; 4954 } 4955 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4956 } 4957 } 4958 4959 static void mvpp2_axi_init(struct mvpp2 *priv) 4960 { 4961 u32 val, rdval, wrval; 4962 4963 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4964 4965 /* AXI Bridge Configuration */ 4966 4967 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4968 << MVPP22_AXI_ATTR_CACHE_OFFS; 4969 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4970 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4971 4972 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4973 << MVPP22_AXI_ATTR_CACHE_OFFS; 4974 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4975 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4976 4977 /* BM */ 4978 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4979 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4980 4981 /* Descriptors */ 4982 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4983 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4984 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4985 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4986 4987 /* Buffer Data */ 4988 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4989 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4990 4991 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4992 << MVPP22_AXI_CODE_CACHE_OFFS; 4993 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4994 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4995 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4996 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4997 4998 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4999 << MVPP22_AXI_CODE_CACHE_OFFS; 5000 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5001 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5002 5003 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 5004 5005 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 5006 << MVPP22_AXI_CODE_CACHE_OFFS; 5007 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 5008 << MVPP22_AXI_CODE_DOMAIN_OFFS; 5009 5010 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 5011 } 5012 5013 /* Initialize network controller common part HW */ 5014 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 5015 { 5016 const struct mbus_dram_target_info *dram_target_info; 5017 int err, i; 5018 u32 val; 5019 5020 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 5021 if ((rxq_number > priv->max_port_rxqs) || 5022 (txq_number > MVPP2_MAX_TXQ)) { 5023 dev_err(&pdev->dev, "invalid queue size parameter\n"); 5024 return -EINVAL; 5025 } 5026 5027 /* MBUS windows configuration */ 5028 dram_target_info = mvebu_mbus_dram_info(); 5029 if (dram_target_info) 5030 mvpp2_conf_mbus_windows(dram_target_info, priv); 5031 5032 if (priv->hw_version == MVPP22) 5033 mvpp2_axi_init(priv); 5034 5035 if (priv->hw_version == MVPP21) { 5036 /* Disable HW PHY polling */ 5037 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5038 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5039 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5040 } else { 5041 /* Enable HW PHY polling */ 5042 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5043 val |= MVPP22_SMI_POLLING_EN; 5044 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5045 } 5046 5047 /* Allocate and initialize aggregated TXQs */ 5048 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5049 sizeof(struct mvpp2_tx_queue), 5050 GFP_KERNEL); 5051 if (!priv->aggr_txqs) 5052 return -ENOMEM; 5053 5054 for_each_present_cpu(i) { 5055 priv->aggr_txqs[i].id = i; 5056 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5057 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5058 MVPP2_AGGR_TXQ_SIZE, i, priv); 5059 if (err < 0) 5060 return err; 5061 } 5062 5063 /* Rx Fifo Init */ 5064 mvpp2_rx_fifo_init(priv); 5065 5066 /* Tx Fifo Init */ 5067 if (priv->hw_version == MVPP22) 5068 mvpp2_tx_fifo_init(priv); 5069 5070 /* Reset Rx queue group interrupt configuration */ 5071 for (i = 0; i < MVPP2_MAX_PORTS; i++) { 5072 if (priv->hw_version == MVPP21) { 5073 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), 5074 CONFIG_MV_ETH_RXQ); 5075 continue; 5076 } else { 5077 u32 val; 5078 5079 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 5080 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5081 5082 val = (CONFIG_MV_ETH_RXQ << 5083 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 5084 mvpp2_write(priv, 5085 MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5086 } 5087 } 5088 5089 if (priv->hw_version == MVPP21) 5090 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5091 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5092 5093 /* Allow cache snoop when transmiting packets */ 5094 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5095 5096 /* Buffer Manager initialization */ 5097 err = mvpp2_bm_init(dev, priv); 5098 if (err < 0) 5099 return err; 5100 5101 /* Parser default initialization */ 5102 err = mvpp2_prs_default_init(dev, priv); 5103 if (err < 0) 5104 return err; 5105 5106 /* Classifier default initialization */ 5107 mvpp2_cls_init(priv); 5108 5109 return 0; 5110 } 5111 5112 /* SMI / MDIO functions */ 5113 5114 static int smi_wait_ready(struct mvpp2 *priv) 5115 { 5116 u32 timeout = MVPP2_SMI_TIMEOUT; 5117 u32 smi_reg; 5118 5119 /* wait till the SMI is not busy */ 5120 do { 5121 /* read smi register */ 5122 smi_reg = readl(priv->mdio_base); 5123 if (timeout-- == 0) { 5124 printf("Error: SMI busy timeout\n"); 5125 return -EFAULT; 5126 } 5127 } while (smi_reg & MVPP2_SMI_BUSY); 5128 5129 return 0; 5130 } 5131 5132 /* 5133 * mpp2_mdio_read - miiphy_read callback function. 5134 * 5135 * Returns 16bit phy register value, or 0xffff on error 5136 */ 5137 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5138 { 5139 struct mvpp2 *priv = bus->priv; 5140 u32 smi_reg; 5141 u32 timeout; 5142 5143 /* check parameters */ 5144 if (addr > MVPP2_PHY_ADDR_MASK) { 5145 printf("Error: Invalid PHY address %d\n", addr); 5146 return -EFAULT; 5147 } 5148 5149 if (reg > MVPP2_PHY_REG_MASK) { 5150 printf("Err: Invalid register offset %d\n", reg); 5151 return -EFAULT; 5152 } 5153 5154 /* wait till the SMI is not busy */ 5155 if (smi_wait_ready(priv) < 0) 5156 return -EFAULT; 5157 5158 /* fill the phy address and regiser offset and read opcode */ 5159 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5160 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5161 | MVPP2_SMI_OPCODE_READ; 5162 5163 /* write the smi register */ 5164 writel(smi_reg, priv->mdio_base); 5165 5166 /* wait till read value is ready */ 5167 timeout = MVPP2_SMI_TIMEOUT; 5168 5169 do { 5170 /* read smi register */ 5171 smi_reg = readl(priv->mdio_base); 5172 if (timeout-- == 0) { 5173 printf("Err: SMI read ready timeout\n"); 5174 return -EFAULT; 5175 } 5176 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5177 5178 /* Wait for the data to update in the SMI register */ 5179 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5180 ; 5181 5182 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5183 } 5184 5185 /* 5186 * mpp2_mdio_write - miiphy_write callback function. 5187 * 5188 * Returns 0 if write succeed, -EINVAL on bad parameters 5189 * -ETIME on timeout 5190 */ 5191 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5192 u16 value) 5193 { 5194 struct mvpp2 *priv = bus->priv; 5195 u32 smi_reg; 5196 5197 /* check parameters */ 5198 if (addr > MVPP2_PHY_ADDR_MASK) { 5199 printf("Error: Invalid PHY address %d\n", addr); 5200 return -EFAULT; 5201 } 5202 5203 if (reg > MVPP2_PHY_REG_MASK) { 5204 printf("Err: Invalid register offset %d\n", reg); 5205 return -EFAULT; 5206 } 5207 5208 /* wait till the SMI is not busy */ 5209 if (smi_wait_ready(priv) < 0) 5210 return -EFAULT; 5211 5212 /* fill the phy addr and reg offset and write opcode and data */ 5213 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5214 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5215 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5216 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5217 5218 /* write the smi register */ 5219 writel(smi_reg, priv->mdio_base); 5220 5221 return 0; 5222 } 5223 5224 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5225 { 5226 struct mvpp2_port *port = dev_get_priv(dev); 5227 struct mvpp2_rx_desc *rx_desc; 5228 struct mvpp2_bm_pool *bm_pool; 5229 dma_addr_t dma_addr; 5230 u32 bm, rx_status; 5231 int pool, rx_bytes, err; 5232 int rx_received; 5233 struct mvpp2_rx_queue *rxq; 5234 u32 cause_rx_tx, cause_rx, cause_misc; 5235 u8 *data; 5236 5237 cause_rx_tx = mvpp2_read(port->priv, 5238 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5239 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5240 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5241 if (!cause_rx_tx && !cause_misc) 5242 return 0; 5243 5244 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5245 5246 /* Process RX packets */ 5247 cause_rx |= port->pending_cause_rx; 5248 rxq = mvpp2_get_rx_queue(port, cause_rx); 5249 5250 /* Get number of received packets and clamp the to-do */ 5251 rx_received = mvpp2_rxq_received(port, rxq->id); 5252 5253 /* Return if no packets are received */ 5254 if (!rx_received) 5255 return 0; 5256 5257 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5258 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5259 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5260 rx_bytes -= MVPP2_MH_SIZE; 5261 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5262 5263 bm = mvpp2_bm_cookie_build(port, rx_desc); 5264 pool = mvpp2_bm_cookie_pool_get(bm); 5265 bm_pool = &port->priv->bm_pools[pool]; 5266 5267 /* In case of an error, release the requested buffer pointer 5268 * to the Buffer Manager. This request process is controlled 5269 * by the hardware, and the information about the buffer is 5270 * comprised by the RX descriptor. 5271 */ 5272 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5273 mvpp2_rx_error(port, rx_desc); 5274 /* Return the buffer to the pool */ 5275 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5276 return 0; 5277 } 5278 5279 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5280 if (err) { 5281 netdev_err(port->dev, "failed to refill BM pools\n"); 5282 return 0; 5283 } 5284 5285 /* Update Rx queue management counters */ 5286 mb(); 5287 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5288 5289 /* give packet to stack - skip on first n bytes */ 5290 data = (u8 *)dma_addr + 2 + 32; 5291 5292 if (rx_bytes <= 0) 5293 return 0; 5294 5295 /* 5296 * No cache invalidation needed here, since the rx_buffer's are 5297 * located in a uncached memory region 5298 */ 5299 *packetp = data; 5300 5301 return rx_bytes; 5302 } 5303 5304 /* Drain Txq */ 5305 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 5306 int enable) 5307 { 5308 u32 val; 5309 5310 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5311 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 5312 if (enable) 5313 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5314 else 5315 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5316 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 5317 } 5318 5319 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5320 { 5321 struct mvpp2_port *port = dev_get_priv(dev); 5322 struct mvpp2_tx_queue *txq, *aggr_txq; 5323 struct mvpp2_tx_desc *tx_desc; 5324 int tx_done; 5325 int timeout; 5326 5327 txq = port->txqs[0]; 5328 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5329 5330 /* Get a descriptor for the first part of the packet */ 5331 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5332 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5333 mvpp2_txdesc_size_set(port, tx_desc, length); 5334 mvpp2_txdesc_offset_set(port, tx_desc, 5335 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5336 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5337 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5338 /* First and Last descriptor */ 5339 mvpp2_txdesc_cmd_set(port, tx_desc, 5340 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5341 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5342 5343 /* Flush tx data */ 5344 flush_dcache_range((unsigned long)packet, 5345 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5346 5347 /* Enable transmit */ 5348 mb(); 5349 mvpp2_aggr_txq_pend_desc_add(port, 1); 5350 5351 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5352 5353 timeout = 0; 5354 do { 5355 if (timeout++ > 10000) { 5356 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5357 return 0; 5358 } 5359 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5360 } while (tx_done); 5361 5362 /* Enable TXQ drain */ 5363 mvpp2_txq_drain(port, txq, 1); 5364 5365 timeout = 0; 5366 do { 5367 if (timeout++ > 10000) { 5368 printf("timeout: packet not sent\n"); 5369 return 0; 5370 } 5371 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5372 } while (!tx_done); 5373 5374 /* Disable TXQ drain */ 5375 mvpp2_txq_drain(port, txq, 0); 5376 5377 return 0; 5378 } 5379 5380 static int mvpp2_start(struct udevice *dev) 5381 { 5382 struct eth_pdata *pdata = dev_get_platdata(dev); 5383 struct mvpp2_port *port = dev_get_priv(dev); 5384 5385 /* Load current MAC address */ 5386 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5387 5388 /* Reconfigure parser accept the original MAC address */ 5389 mvpp2_prs_update_mac_da(port, port->dev_addr); 5390 5391 switch (port->phy_interface) { 5392 case PHY_INTERFACE_MODE_RGMII: 5393 case PHY_INTERFACE_MODE_RGMII_ID: 5394 case PHY_INTERFACE_MODE_SGMII: 5395 mvpp2_port_power_up(port); 5396 default: 5397 break; 5398 } 5399 5400 mvpp2_open(dev, port); 5401 5402 return 0; 5403 } 5404 5405 static void mvpp2_stop(struct udevice *dev) 5406 { 5407 struct mvpp2_port *port = dev_get_priv(dev); 5408 5409 mvpp2_stop_dev(port); 5410 mvpp2_cleanup_rxqs(port); 5411 mvpp2_cleanup_txqs(port); 5412 } 5413 5414 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5415 { 5416 writel(port->phyaddr, port->priv->iface_base + 5417 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5418 5419 return 0; 5420 } 5421 5422 static int mvpp2_base_probe(struct udevice *dev) 5423 { 5424 struct mvpp2 *priv = dev_get_priv(dev); 5425 struct mii_dev *bus; 5426 void *bd_space; 5427 u32 size = 0; 5428 int i; 5429 5430 /* Save hw-version */ 5431 priv->hw_version = dev_get_driver_data(dev); 5432 5433 /* 5434 * U-Boot special buffer handling: 5435 * 5436 * Allocate buffer area for descs and rx_buffers. This is only 5437 * done once for all interfaces. As only one interface can 5438 * be active. Make this area DMA-safe by disabling the D-cache 5439 */ 5440 5441 /* Align buffer area for descs and rx_buffers to 1MiB */ 5442 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5443 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5444 BD_SPACE, DCACHE_OFF); 5445 5446 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5447 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5448 5449 buffer_loc.tx_descs = 5450 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5451 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5452 5453 buffer_loc.rx_descs = 5454 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5455 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5456 5457 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5458 buffer_loc.bm_pool[i] = 5459 (unsigned long *)((unsigned long)bd_space + size); 5460 if (priv->hw_version == MVPP21) 5461 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5462 else 5463 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5464 } 5465 5466 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5467 buffer_loc.rx_buffer[i] = 5468 (unsigned long *)((unsigned long)bd_space + size); 5469 size += RX_BUFFER_SIZE; 5470 } 5471 5472 /* Clear the complete area so that all descriptors are cleared */ 5473 memset(bd_space, 0, size); 5474 5475 /* Save base addresses for later use */ 5476 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5477 if (IS_ERR(priv->base)) 5478 return PTR_ERR(priv->base); 5479 5480 if (priv->hw_version == MVPP21) { 5481 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5482 if (IS_ERR(priv->lms_base)) 5483 return PTR_ERR(priv->lms_base); 5484 5485 priv->mdio_base = priv->lms_base + MVPP21_SMI; 5486 } else { 5487 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5488 if (IS_ERR(priv->iface_base)) 5489 return PTR_ERR(priv->iface_base); 5490 5491 priv->mdio_base = priv->iface_base + MVPP22_SMI; 5492 5493 /* Store common base addresses for all ports */ 5494 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5495 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5496 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5497 } 5498 5499 if (priv->hw_version == MVPP21) 5500 priv->max_port_rxqs = 8; 5501 else 5502 priv->max_port_rxqs = 32; 5503 5504 /* Finally create and register the MDIO bus driver */ 5505 bus = mdio_alloc(); 5506 if (!bus) { 5507 printf("Failed to allocate MDIO bus\n"); 5508 return -ENOMEM; 5509 } 5510 5511 bus->read = mpp2_mdio_read; 5512 bus->write = mpp2_mdio_write; 5513 snprintf(bus->name, sizeof(bus->name), dev->name); 5514 bus->priv = (void *)priv; 5515 priv->bus = bus; 5516 5517 return mdio_register(bus); 5518 } 5519 5520 static int mvpp2_probe(struct udevice *dev) 5521 { 5522 struct mvpp2_port *port = dev_get_priv(dev); 5523 struct mvpp2 *priv = dev_get_priv(dev->parent); 5524 int err; 5525 5526 /* Only call the probe function for the parent once */ 5527 if (!priv->probe_done) { 5528 err = mvpp2_base_probe(dev->parent); 5529 priv->probe_done = 1; 5530 } 5531 5532 port->priv = dev_get_priv(dev->parent); 5533 5534 err = phy_info_parse(dev, port); 5535 if (err) 5536 return err; 5537 5538 /* 5539 * We need the port specific io base addresses at this stage, since 5540 * gop_port_init() accesses these registers 5541 */ 5542 if (priv->hw_version == MVPP21) { 5543 int priv_common_regs_num = 2; 5544 5545 port->base = (void __iomem *)devfdt_get_addr_index( 5546 dev->parent, priv_common_regs_num + port->id); 5547 if (IS_ERR(port->base)) 5548 return PTR_ERR(port->base); 5549 } else { 5550 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5551 "gop-port-id", -1); 5552 if (port->id == -1) { 5553 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5554 return -EINVAL; 5555 } 5556 5557 port->base = priv->iface_base + MVPP22_PORT_BASE + 5558 port->gop_id * MVPP22_PORT_OFFSET; 5559 5560 /* Set phy address of the port */ 5561 if(port->phy_node) 5562 mvpp22_smi_phy_addr_cfg(port); 5563 5564 /* GoP Init */ 5565 gop_port_init(port); 5566 } 5567 5568 /* Initialize network controller */ 5569 err = mvpp2_init(dev, priv); 5570 if (err < 0) { 5571 dev_err(&pdev->dev, "failed to initialize controller\n"); 5572 return err; 5573 } 5574 5575 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5576 if (err) 5577 return err; 5578 5579 if (priv->hw_version == MVPP22) { 5580 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5581 port->phy_interface); 5582 5583 /* Netcomplex configurations for all ports */ 5584 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5585 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5586 } 5587 5588 return 0; 5589 } 5590 5591 /* 5592 * Empty BM pool and stop its activity before the OS is started 5593 */ 5594 static int mvpp2_remove(struct udevice *dev) 5595 { 5596 struct mvpp2_port *port = dev_get_priv(dev); 5597 struct mvpp2 *priv = port->priv; 5598 int i; 5599 5600 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5601 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5602 5603 return 0; 5604 } 5605 5606 static const struct eth_ops mvpp2_ops = { 5607 .start = mvpp2_start, 5608 .send = mvpp2_send, 5609 .recv = mvpp2_recv, 5610 .stop = mvpp2_stop, 5611 }; 5612 5613 static struct driver mvpp2_driver = { 5614 .name = "mvpp2", 5615 .id = UCLASS_ETH, 5616 .probe = mvpp2_probe, 5617 .remove = mvpp2_remove, 5618 .ops = &mvpp2_ops, 5619 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5620 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5621 .flags = DM_FLAG_ACTIVE_DMA, 5622 }; 5623 5624 /* 5625 * Use a MISC device to bind the n instances (child nodes) of the 5626 * network base controller in UCLASS_ETH. 5627 */ 5628 static int mvpp2_base_bind(struct udevice *parent) 5629 { 5630 const void *blob = gd->fdt_blob; 5631 int node = dev_of_offset(parent); 5632 struct uclass_driver *drv; 5633 struct udevice *dev; 5634 struct eth_pdata *plat; 5635 char *name; 5636 int subnode; 5637 u32 id; 5638 int base_id_add; 5639 5640 /* Lookup eth driver */ 5641 drv = lists_uclass_lookup(UCLASS_ETH); 5642 if (!drv) { 5643 puts("Cannot find eth driver\n"); 5644 return -ENOENT; 5645 } 5646 5647 base_id_add = base_id; 5648 5649 fdt_for_each_subnode(subnode, blob, node) { 5650 /* Increment base_id for all subnodes, also the disabled ones */ 5651 base_id++; 5652 5653 /* Skip disabled ports */ 5654 if (!fdtdec_get_is_enabled(blob, subnode)) 5655 continue; 5656 5657 plat = calloc(1, sizeof(*plat)); 5658 if (!plat) 5659 return -ENOMEM; 5660 5661 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5662 id += base_id_add; 5663 5664 name = calloc(1, 16); 5665 sprintf(name, "mvpp2-%d", id); 5666 5667 /* Create child device UCLASS_ETH and bind it */ 5668 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5669 dev_set_of_offset(dev, subnode); 5670 } 5671 5672 return 0; 5673 } 5674 5675 static const struct udevice_id mvpp2_ids[] = { 5676 { 5677 .compatible = "marvell,armada-375-pp2", 5678 .data = MVPP21, 5679 }, 5680 { 5681 .compatible = "marvell,armada-7k-pp22", 5682 .data = MVPP22, 5683 }, 5684 { } 5685 }; 5686 5687 U_BOOT_DRIVER(mvpp2_base) = { 5688 .name = "mvpp2_base", 5689 .id = UCLASS_MISC, 5690 .of_match = mvpp2_ids, 5691 .bind = mvpp2_base_bind, 5692 .priv_auto_alloc_size = sizeof(struct mvpp2), 5693 }; 5694