1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* Some linux -> U-Boot compatibility stuff */ 39 #define netdev_err(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_warn(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_info(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 #define netdev_dbg(dev, fmt, args...) \ 46 printf(fmt, ##args) 47 48 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 49 50 #define __verify_pcpu_ptr(ptr) \ 51 do { \ 52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 53 (void)__vpp_verify; \ 54 } while (0) 55 56 #define VERIFY_PERCPU_PTR(__p) \ 57 ({ \ 58 __verify_pcpu_ptr(__p); \ 59 (typeof(*(__p)) __kernel __force *)(__p); \ 60 }) 61 62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 63 #define smp_processor_id() 0 64 #define num_present_cpus() 1 65 #define for_each_present_cpu(cpu) \ 66 for ((cpu) = 0; (cpu) < 1; (cpu)++) 67 68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 69 70 #define CONFIG_NR_CPUS 1 71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 72 73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 74 #define WRAP (2 + ETH_HLEN + 4 + 32) 75 #define MTU 1500 76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 77 78 #define MVPP2_SMI_TIMEOUT 10000 79 80 /* RX Fifo Registers */ 81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 84 #define MVPP2_RX_FIFO_INIT_REG 0x64 85 86 /* RX DMA Top Registers */ 87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 98 #define MVPP2_RXQ_POOL_LONG_OFFS 24 99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 103 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 104 105 /* Parser Registers */ 106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 107 #define MVPP2_PRS_PORT_LU_MAX 0xf 108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 123 124 /* Classifier Registers */ 125 #define MVPP2_CLS_MODE_REG 0x1800 126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 127 #define MVPP2_CLS_PORT_WAY_REG 0x1810 128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 131 #define MVPP2_CLS_LKP_TBL_REG 0x1818 132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 144 145 /* Descriptor Manager Top Registers */ 146 #define MVPP2_RXQ_NUM_REG 0x2040 147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 148 #define MVPP22_DESC_ADDR_OFFS 8 149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 158 #define MVPP2_RXQ_THRESH_REG 0x204c 159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 161 #define MVPP2_RXQ_INDEX_REG 0x2050 162 #define MVPP2_TXQ_NUM_REG 0x2080 163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 167 #define MVPP2_TXQ_THRESH_REG 0x2094 168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 170 #define MVPP2_TXQ_INDEX_REG 0x2098 171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 177 #define MVPP2_TXQ_PENDING_REG 0x20a0 178 #define MVPP2_TXQ_PENDING_MASK 0x3fff 179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 196 197 /* MBUS bridge registers */ 198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 201 #define MVPP2_BASE_ADDR_ENABLE 0x4060 202 203 /* AXI Bridge Registers */ 204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 216 217 /* Values for AXI Bridge registers */ 218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 220 221 #define MVPP22_AXI_CODE_CACHE_OFFS 0 222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 223 224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 227 228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 230 231 /* Interrupt Cause and Mask registers */ 232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 234 235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 239 240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 242 243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 247 248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 266 267 /* Buffer Manager registers */ 268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 281 #define MVPP2_BM_START_MASK BIT(0) 282 #define MVPP2_BM_STOP_MASK BIT(1) 283 #define MVPP2_BM_STATE_MASK BIT(4) 284 #define MVPP2_BM_LOW_THRESH_OFFS 8 285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 287 MVPP2_BM_LOW_THRESH_OFFS) 288 #define MVPP2_BM_HIGH_THRESH_OFFS 16 289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 291 MVPP2_BM_HIGH_THRESH_OFFS) 292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 311 #define MVPP21_BM_MC_RLS_REG 0x64c4 312 #define MVPP2_BM_MC_ID_MASK 0xfff 313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 318 #define MVPP22_BM_MC_RLS_REG 0x64d4 319 320 /* TX Scheduler registers */ 321 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 322 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 323 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 324 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 325 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 326 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 327 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 328 #define MVPP2_TXP_MTU_MAX 0x7FFFF 329 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 330 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 331 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 332 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 333 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 334 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 335 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 336 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 337 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 338 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 339 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 340 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 341 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 342 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 343 344 /* TX general registers */ 345 #define MVPP2_TX_SNOOP_REG 0x8800 346 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 347 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 348 349 /* LMS registers */ 350 #define MVPP2_SRC_ADDR_MIDDLE 0x24 351 #define MVPP2_SRC_ADDR_HIGH 0x28 352 #define MVPP2_PHY_AN_CFG0_REG 0x34 353 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 354 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 355 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 356 357 /* Per-port registers */ 358 #define MVPP2_GMAC_CTRL_0_REG 0x0 359 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 360 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 361 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 362 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 363 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 364 #define MVPP2_GMAC_CTRL_1_REG 0x4 365 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 366 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 367 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 368 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 369 #define MVPP2_GMAC_SA_LOW_OFFS 7 370 #define MVPP2_GMAC_CTRL_2_REG 0x8 371 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 372 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 373 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 374 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 375 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 376 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 377 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 378 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 379 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 380 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 381 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 382 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 383 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 384 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 385 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 386 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 387 #define MVPP2_GMAC_EN_FC_AN BIT(11) 388 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 389 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 390 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 391 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 392 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 393 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 395 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 396 #define MVPP2_GMAC_CTRL_4_REG 0x90 397 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 398 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 399 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 400 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 401 402 /* 403 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 404 * relative to port->base. 405 */ 406 407 /* Port Mac Control0 */ 408 #define MVPP22_XLG_CTRL0_REG 0x100 409 #define MVPP22_XLG_PORT_EN BIT(0) 410 #define MVPP22_XLG_MAC_RESETN BIT(1) 411 #define MVPP22_XLG_RX_FC_EN BIT(7) 412 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 413 /* Port Mac Control1 */ 414 #define MVPP22_XLG_CTRL1_REG 0x104 415 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 416 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 417 /* Port Interrupt Mask */ 418 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 419 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 420 /* Port Mac Control3 */ 421 #define MVPP22_XLG_CTRL3_REG 0x11c 422 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 423 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 424 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 425 /* Port Mac Control4 */ 426 #define MVPP22_XLG_CTRL4_REG 0x184 427 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 428 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 429 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 430 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 431 432 /* XPCS registers */ 433 434 /* Global Configuration 0 */ 435 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 436 #define MVPP22_XPCS_PCSRESET BIT(0) 437 #define MVPP22_XPCS_PCSMODE_OFFS 3 438 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 439 MVPP22_XPCS_PCSMODE_OFFS) 440 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 441 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 442 MVPP22_XPCS_LANEACTIVE_OFFS) 443 444 /* MPCS registers */ 445 446 #define PCS40G_COMMON_CONTROL 0x14 447 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 448 449 #define PCS_CLOCK_RESET 0x14c 450 #define TX_SD_CLK_RESET_MASK BIT(0) 451 #define RX_SD_CLK_RESET_MASK BIT(1) 452 #define MAC_CLK_RESET_MASK BIT(2) 453 #define CLK_DIVISION_RATIO_OFFS 4 454 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 455 #define CLK_DIV_PHASE_SET_MASK BIT(11) 456 457 /* System Soft Reset 1 */ 458 #define GOP_SOFT_RESET_1_REG 0x108 459 #define NETC_GOP_SOFT_RESET_OFFS 6 460 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 461 NETC_GOP_SOFT_RESET_OFFS) 462 463 /* Ports Control 0 */ 464 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 465 #define NETC_BUS_WIDTH_SELECT_OFFS 1 466 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 467 NETC_BUS_WIDTH_SELECT_OFFS) 468 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 469 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 470 NETC_GIG_RX_DATA_SAMPLE_OFFS) 471 #define NETC_CLK_DIV_PHASE_OFFS 31 472 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 473 /* Ports Control 1 */ 474 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 475 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 476 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 477 NETC_PORTS_ACTIVE_OFFSET(p)) 478 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 479 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 480 NETC_PORT_GIG_RF_RESET_OFFS(p)) 481 #define NETCOMP_CONTROL_0_REG 0x120 482 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 483 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 484 NETC_GBE_PORT0_SGMII_MODE_OFFS) 485 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 486 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 487 NETC_GBE_PORT1_SGMII_MODE_OFFS) 488 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 489 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 490 NETC_GBE_PORT1_MII_MODE_OFFS) 491 492 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 493 #define MVPP22_SMI_POLLING_EN BIT(10) 494 495 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 496 (0x4 * (port))) 497 498 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 499 500 /* Descriptor ring Macros */ 501 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 502 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 503 504 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 505 #define MVPP21_SMI 0x0054 506 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 507 #define MVPP22_SMI 0x1200 508 #define MVPP2_PHY_REG_MASK 0x1f 509 /* SMI register fields */ 510 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 511 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 512 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 513 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 514 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 515 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 516 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 517 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 518 519 #define MVPP2_PHY_ADDR_MASK 0x1f 520 #define MVPP2_PHY_REG_MASK 0x1f 521 522 /* Additional PPv2.2 offsets */ 523 #define MVPP22_MPCS 0x007000 524 #define MVPP22_XPCS 0x007400 525 #define MVPP22_PORT_BASE 0x007e00 526 #define MVPP22_PORT_OFFSET 0x001000 527 #define MVPP22_RFU1 0x318000 528 529 /* Maximum number of ports */ 530 #define MVPP22_GOP_MAC_NUM 4 531 532 /* Sets the field located at the specified in data */ 533 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 534 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 535 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 536 537 /* Net Complex */ 538 enum mv_netc_topology { 539 MV_NETC_GE_MAC2_SGMII = BIT(0), 540 MV_NETC_GE_MAC3_SGMII = BIT(1), 541 MV_NETC_GE_MAC3_RGMII = BIT(2), 542 }; 543 544 enum mv_netc_phase { 545 MV_NETC_FIRST_PHASE, 546 MV_NETC_SECOND_PHASE, 547 }; 548 549 enum mv_netc_sgmii_xmi_mode { 550 MV_NETC_GBE_SGMII, 551 MV_NETC_GBE_XMII, 552 }; 553 554 enum mv_netc_mii_mode { 555 MV_NETC_GBE_RGMII, 556 MV_NETC_GBE_MII, 557 }; 558 559 enum mv_netc_lanes { 560 MV_NETC_LANE_23, 561 MV_NETC_LANE_45, 562 }; 563 564 /* Various constants */ 565 566 /* Coalescing */ 567 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 568 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 569 #define MVPP2_RX_COAL_PKTS 32 570 #define MVPP2_RX_COAL_USEC 100 571 572 /* The two bytes Marvell header. Either contains a special value used 573 * by Marvell switches when a specific hardware mode is enabled (not 574 * supported by this driver) or is filled automatically by zeroes on 575 * the RX side. Those two bytes being at the front of the Ethernet 576 * header, they allow to have the IP header aligned on a 4 bytes 577 * boundary automatically: the hardware skips those two bytes on its 578 * own. 579 */ 580 #define MVPP2_MH_SIZE 2 581 #define MVPP2_ETH_TYPE_LEN 2 582 #define MVPP2_PPPOE_HDR_SIZE 8 583 #define MVPP2_VLAN_TAG_LEN 4 584 585 /* Lbtd 802.3 type */ 586 #define MVPP2_IP_LBDT_TYPE 0xfffa 587 588 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 589 #define MVPP2_TX_CSUM_MAX_SIZE 9800 590 591 /* Timeout constants */ 592 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 593 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 594 595 #define MVPP2_TX_MTU_MAX 0x7ffff 596 597 /* Maximum number of T-CONTs of PON port */ 598 #define MVPP2_MAX_TCONT 16 599 600 /* Maximum number of supported ports */ 601 #define MVPP2_MAX_PORTS 4 602 603 /* Maximum number of TXQs used by single port */ 604 #define MVPP2_MAX_TXQ 8 605 606 /* Default number of TXQs in use */ 607 #define MVPP2_DEFAULT_TXQ 1 608 609 /* Dfault number of RXQs in use */ 610 #define MVPP2_DEFAULT_RXQ 1 611 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 612 613 /* Max number of Rx descriptors */ 614 #define MVPP2_MAX_RXD 16 615 616 /* Max number of Tx descriptors */ 617 #define MVPP2_MAX_TXD 16 618 619 /* Amount of Tx descriptors that can be reserved at once by CPU */ 620 #define MVPP2_CPU_DESC_CHUNK 16 621 622 /* Max number of Tx descriptors in each aggregated queue */ 623 #define MVPP2_AGGR_TXQ_SIZE 16 624 625 /* Descriptor aligned size */ 626 #define MVPP2_DESC_ALIGNED_SIZE 32 627 628 /* Descriptor alignment mask */ 629 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 630 631 /* RX FIFO constants */ 632 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 633 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 634 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 635 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 636 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 637 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 638 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 639 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 640 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 641 642 /* TX general registers */ 643 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 644 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 645 646 /* TX FIFO constants */ 647 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 648 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 649 650 /* RX buffer constants */ 651 #define MVPP2_SKB_SHINFO_SIZE \ 652 0 653 654 #define MVPP2_RX_PKT_SIZE(mtu) \ 655 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 656 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 657 658 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 659 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 660 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 661 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 662 663 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 664 665 /* IPv6 max L3 address size */ 666 #define MVPP2_MAX_L3_ADDR_SIZE 16 667 668 /* Port flags */ 669 #define MVPP2_F_LOOPBACK BIT(0) 670 671 /* Marvell tag types */ 672 enum mvpp2_tag_type { 673 MVPP2_TAG_TYPE_NONE = 0, 674 MVPP2_TAG_TYPE_MH = 1, 675 MVPP2_TAG_TYPE_DSA = 2, 676 MVPP2_TAG_TYPE_EDSA = 3, 677 MVPP2_TAG_TYPE_VLAN = 4, 678 MVPP2_TAG_TYPE_LAST = 5 679 }; 680 681 /* Parser constants */ 682 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 683 #define MVPP2_PRS_TCAM_WORDS 6 684 #define MVPP2_PRS_SRAM_WORDS 4 685 #define MVPP2_PRS_FLOW_ID_SIZE 64 686 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 687 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 688 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 689 #define MVPP2_PRS_IPV4_HEAD 0x40 690 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 691 #define MVPP2_PRS_IPV4_MC 0xe0 692 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 693 #define MVPP2_PRS_IPV4_BC_MASK 0xff 694 #define MVPP2_PRS_IPV4_IHL 0x5 695 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 696 #define MVPP2_PRS_IPV6_MC 0xff 697 #define MVPP2_PRS_IPV6_MC_MASK 0xff 698 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 699 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 700 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 701 #define MVPP2_PRS_DBL_VLANS_MAX 100 702 703 /* Tcam structure: 704 * - lookup ID - 4 bits 705 * - port ID - 1 byte 706 * - additional information - 1 byte 707 * - header data - 8 bytes 708 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 709 */ 710 #define MVPP2_PRS_AI_BITS 8 711 #define MVPP2_PRS_PORT_MASK 0xff 712 #define MVPP2_PRS_LU_MASK 0xf 713 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 714 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 715 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 716 (((offs) * 2) - ((offs) % 2) + 2) 717 #define MVPP2_PRS_TCAM_AI_BYTE 16 718 #define MVPP2_PRS_TCAM_PORT_BYTE 17 719 #define MVPP2_PRS_TCAM_LU_BYTE 20 720 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 721 #define MVPP2_PRS_TCAM_INV_WORD 5 722 /* Tcam entries ID */ 723 #define MVPP2_PE_DROP_ALL 0 724 #define MVPP2_PE_FIRST_FREE_TID 1 725 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 726 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 727 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 728 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 729 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 730 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 731 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 732 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 733 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 734 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 735 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 736 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 737 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 738 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 739 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 740 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 741 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 742 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 743 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 744 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 745 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 746 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 747 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 748 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 749 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 750 751 /* Sram structure 752 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 753 */ 754 #define MVPP2_PRS_SRAM_RI_OFFS 0 755 #define MVPP2_PRS_SRAM_RI_WORD 0 756 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 757 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 758 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 759 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 760 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 761 #define MVPP2_PRS_SRAM_UDF_OFFS 73 762 #define MVPP2_PRS_SRAM_UDF_BITS 8 763 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 764 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 765 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 766 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 767 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 768 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 769 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 770 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 774 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 775 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 780 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 781 #define MVPP2_PRS_SRAM_AI_OFFS 90 782 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 783 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 784 #define MVPP2_PRS_SRAM_AI_MASK 0xff 785 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 786 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 787 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 788 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 789 790 /* Sram result info bits assignment */ 791 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 792 #define MVPP2_PRS_RI_DSA_MASK 0x2 793 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 794 #define MVPP2_PRS_RI_VLAN_NONE 0x0 795 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 796 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 797 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 798 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 799 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 800 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 801 #define MVPP2_PRS_RI_L2_UCAST 0x0 802 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 803 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 804 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 805 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 806 #define MVPP2_PRS_RI_L3_UN 0x0 807 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 808 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 809 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 810 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 811 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 812 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 813 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 814 #define MVPP2_PRS_RI_L3_UCAST 0x0 815 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 816 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 817 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 818 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 819 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 820 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 821 #define MVPP2_PRS_RI_L4_TCP BIT(22) 822 #define MVPP2_PRS_RI_L4_UDP BIT(23) 823 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 824 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 825 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 826 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 827 828 /* Sram additional info bits assignment */ 829 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 830 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 831 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 832 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 833 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 834 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 835 #define MVPP2_PRS_SINGLE_VLAN_AI 0 836 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 837 838 /* DSA/EDSA type */ 839 #define MVPP2_PRS_TAGGED true 840 #define MVPP2_PRS_UNTAGGED false 841 #define MVPP2_PRS_EDSA true 842 #define MVPP2_PRS_DSA false 843 844 /* MAC entries, shadow udf */ 845 enum mvpp2_prs_udf { 846 MVPP2_PRS_UDF_MAC_DEF, 847 MVPP2_PRS_UDF_MAC_RANGE, 848 MVPP2_PRS_UDF_L2_DEF, 849 MVPP2_PRS_UDF_L2_DEF_COPY, 850 MVPP2_PRS_UDF_L2_USER, 851 }; 852 853 /* Lookup ID */ 854 enum mvpp2_prs_lookup { 855 MVPP2_PRS_LU_MH, 856 MVPP2_PRS_LU_MAC, 857 MVPP2_PRS_LU_DSA, 858 MVPP2_PRS_LU_VLAN, 859 MVPP2_PRS_LU_L2, 860 MVPP2_PRS_LU_PPPOE, 861 MVPP2_PRS_LU_IP4, 862 MVPP2_PRS_LU_IP6, 863 MVPP2_PRS_LU_FLOWS, 864 MVPP2_PRS_LU_LAST, 865 }; 866 867 /* L3 cast enum */ 868 enum mvpp2_prs_l3_cast { 869 MVPP2_PRS_L3_UNI_CAST, 870 MVPP2_PRS_L3_MULTI_CAST, 871 MVPP2_PRS_L3_BROAD_CAST 872 }; 873 874 /* Classifier constants */ 875 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 876 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 877 #define MVPP2_CLS_LKP_TBL_SIZE 64 878 879 /* BM constants */ 880 #define MVPP2_BM_POOLS_NUM 1 881 #define MVPP2_BM_LONG_BUF_NUM 16 882 #define MVPP2_BM_SHORT_BUF_NUM 16 883 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 884 #define MVPP2_BM_POOL_PTR_ALIGN 128 885 #define MVPP2_BM_SWF_LONG_POOL(port) 0 886 887 /* BM cookie (32 bits) definition */ 888 #define MVPP2_BM_COOKIE_POOL_OFFS 8 889 #define MVPP2_BM_COOKIE_CPU_OFFS 24 890 891 /* BM short pool packet size 892 * These value assure that for SWF the total number 893 * of bytes allocated for each buffer will be 512 894 */ 895 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 896 897 enum mvpp2_bm_type { 898 MVPP2_BM_FREE, 899 MVPP2_BM_SWF_LONG, 900 MVPP2_BM_SWF_SHORT 901 }; 902 903 /* Definitions */ 904 905 /* Shared Packet Processor resources */ 906 struct mvpp2 { 907 /* Shared registers' base addresses */ 908 void __iomem *base; 909 void __iomem *lms_base; 910 void __iomem *iface_base; 911 void __iomem *mdio_base; 912 913 void __iomem *mpcs_base; 914 void __iomem *xpcs_base; 915 void __iomem *rfu1_base; 916 917 u32 netc_config; 918 919 /* List of pointers to port structures */ 920 struct mvpp2_port **port_list; 921 922 /* Aggregated TXQs */ 923 struct mvpp2_tx_queue *aggr_txqs; 924 925 /* BM pools */ 926 struct mvpp2_bm_pool *bm_pools; 927 928 /* PRS shadow table */ 929 struct mvpp2_prs_shadow *prs_shadow; 930 /* PRS auxiliary table for double vlan entries control */ 931 bool *prs_double_vlans; 932 933 /* Tclk value */ 934 u32 tclk; 935 936 /* HW version */ 937 enum { MVPP21, MVPP22 } hw_version; 938 939 /* Maximum number of RXQs per port */ 940 unsigned int max_port_rxqs; 941 942 struct mii_dev *bus; 943 944 int probe_done; 945 u8 num_ports; 946 }; 947 948 struct mvpp2_pcpu_stats { 949 u64 rx_packets; 950 u64 rx_bytes; 951 u64 tx_packets; 952 u64 tx_bytes; 953 }; 954 955 struct mvpp2_port { 956 u8 id; 957 958 /* Index of the port from the "group of ports" complex point 959 * of view 960 */ 961 int gop_id; 962 963 int irq; 964 965 struct mvpp2 *priv; 966 967 /* Per-port registers' base address */ 968 void __iomem *base; 969 970 struct mvpp2_rx_queue **rxqs; 971 struct mvpp2_tx_queue **txqs; 972 973 int pkt_size; 974 975 u32 pending_cause_rx; 976 977 /* Per-CPU port control */ 978 struct mvpp2_port_pcpu __percpu *pcpu; 979 980 /* Flags */ 981 unsigned long flags; 982 983 u16 tx_ring_size; 984 u16 rx_ring_size; 985 struct mvpp2_pcpu_stats __percpu *stats; 986 987 struct phy_device *phy_dev; 988 phy_interface_t phy_interface; 989 int phy_node; 990 int phyaddr; 991 #ifdef CONFIG_DM_GPIO 992 struct gpio_desc phy_reset_gpio; 993 struct gpio_desc phy_tx_disable_gpio; 994 #endif 995 int init; 996 unsigned int link; 997 unsigned int duplex; 998 unsigned int speed; 999 1000 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 1001 1002 struct mvpp2_bm_pool *pool_long; 1003 struct mvpp2_bm_pool *pool_short; 1004 1005 /* Index of first port's physical RXQ */ 1006 u8 first_rxq; 1007 1008 u8 dev_addr[ETH_ALEN]; 1009 }; 1010 1011 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1012 * layout of the transmit and reception DMA descriptors, and their 1013 * layout is therefore defined by the hardware design 1014 */ 1015 1016 #define MVPP2_TXD_L3_OFF_SHIFT 0 1017 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1018 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1019 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1020 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1021 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1022 #define MVPP2_TXD_L4_UDP BIT(24) 1023 #define MVPP2_TXD_L3_IP6 BIT(26) 1024 #define MVPP2_TXD_L_DESC BIT(28) 1025 #define MVPP2_TXD_F_DESC BIT(29) 1026 1027 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1028 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1029 #define MVPP2_RXD_ERR_CRC 0x0 1030 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1031 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1032 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1033 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1034 #define MVPP2_RXD_HWF_SYNC BIT(21) 1035 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1036 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1037 #define MVPP2_RXD_L4_TCP BIT(25) 1038 #define MVPP2_RXD_L4_UDP BIT(26) 1039 #define MVPP2_RXD_L3_IP4 BIT(28) 1040 #define MVPP2_RXD_L3_IP6 BIT(30) 1041 #define MVPP2_RXD_BUF_HDR BIT(31) 1042 1043 /* HW TX descriptor for PPv2.1 */ 1044 struct mvpp21_tx_desc { 1045 u32 command; /* Options used by HW for packet transmitting.*/ 1046 u8 packet_offset; /* the offset from the buffer beginning */ 1047 u8 phys_txq; /* destination queue ID */ 1048 u16 data_size; /* data size of transmitted packet in bytes */ 1049 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1050 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1051 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1052 u32 reserved2; /* reserved (for future use) */ 1053 }; 1054 1055 /* HW RX descriptor for PPv2.1 */ 1056 struct mvpp21_rx_desc { 1057 u32 status; /* info about received packet */ 1058 u16 reserved1; /* parser_info (for future use, PnC) */ 1059 u16 data_size; /* size of received packet in bytes */ 1060 u32 buf_dma_addr; /* physical address of the buffer */ 1061 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1062 u16 reserved2; /* gem_port_id (for future use, PON) */ 1063 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1064 u8 reserved4; /* bm_qset (for future use, BM) */ 1065 u8 reserved5; 1066 u16 reserved6; /* classify_info (for future use, PnC) */ 1067 u32 reserved7; /* flow_id (for future use, PnC) */ 1068 u32 reserved8; 1069 }; 1070 1071 /* HW TX descriptor for PPv2.2 */ 1072 struct mvpp22_tx_desc { 1073 u32 command; 1074 u8 packet_offset; 1075 u8 phys_txq; 1076 u16 data_size; 1077 u64 reserved1; 1078 u64 buf_dma_addr_ptp; 1079 u64 buf_cookie_misc; 1080 }; 1081 1082 /* HW RX descriptor for PPv2.2 */ 1083 struct mvpp22_rx_desc { 1084 u32 status; 1085 u16 reserved1; 1086 u16 data_size; 1087 u32 reserved2; 1088 u32 reserved3; 1089 u64 buf_dma_addr_key_hash; 1090 u64 buf_cookie_misc; 1091 }; 1092 1093 /* Opaque type used by the driver to manipulate the HW TX and RX 1094 * descriptors 1095 */ 1096 struct mvpp2_tx_desc { 1097 union { 1098 struct mvpp21_tx_desc pp21; 1099 struct mvpp22_tx_desc pp22; 1100 }; 1101 }; 1102 1103 struct mvpp2_rx_desc { 1104 union { 1105 struct mvpp21_rx_desc pp21; 1106 struct mvpp22_rx_desc pp22; 1107 }; 1108 }; 1109 1110 /* Per-CPU Tx queue control */ 1111 struct mvpp2_txq_pcpu { 1112 int cpu; 1113 1114 /* Number of Tx DMA descriptors in the descriptor ring */ 1115 int size; 1116 1117 /* Number of currently used Tx DMA descriptor in the 1118 * descriptor ring 1119 */ 1120 int count; 1121 1122 /* Number of Tx DMA descriptors reserved for each CPU */ 1123 int reserved_num; 1124 1125 /* Index of last TX DMA descriptor that was inserted */ 1126 int txq_put_index; 1127 1128 /* Index of the TX DMA descriptor to be cleaned up */ 1129 int txq_get_index; 1130 }; 1131 1132 struct mvpp2_tx_queue { 1133 /* Physical number of this Tx queue */ 1134 u8 id; 1135 1136 /* Logical number of this Tx queue */ 1137 u8 log_id; 1138 1139 /* Number of Tx DMA descriptors in the descriptor ring */ 1140 int size; 1141 1142 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1143 int count; 1144 1145 /* Per-CPU control of physical Tx queues */ 1146 struct mvpp2_txq_pcpu __percpu *pcpu; 1147 1148 u32 done_pkts_coal; 1149 1150 /* Virtual address of thex Tx DMA descriptors array */ 1151 struct mvpp2_tx_desc *descs; 1152 1153 /* DMA address of the Tx DMA descriptors array */ 1154 dma_addr_t descs_dma; 1155 1156 /* Index of the last Tx DMA descriptor */ 1157 int last_desc; 1158 1159 /* Index of the next Tx DMA descriptor to process */ 1160 int next_desc_to_proc; 1161 }; 1162 1163 struct mvpp2_rx_queue { 1164 /* RX queue number, in the range 0-31 for physical RXQs */ 1165 u8 id; 1166 1167 /* Num of rx descriptors in the rx descriptor ring */ 1168 int size; 1169 1170 u32 pkts_coal; 1171 u32 time_coal; 1172 1173 /* Virtual address of the RX DMA descriptors array */ 1174 struct mvpp2_rx_desc *descs; 1175 1176 /* DMA address of the RX DMA descriptors array */ 1177 dma_addr_t descs_dma; 1178 1179 /* Index of the last RX DMA descriptor */ 1180 int last_desc; 1181 1182 /* Index of the next RX DMA descriptor to process */ 1183 int next_desc_to_proc; 1184 1185 /* ID of port to which physical RXQ is mapped */ 1186 int port; 1187 1188 /* Port's logic RXQ number to which physical RXQ is mapped */ 1189 int logic_rxq; 1190 }; 1191 1192 union mvpp2_prs_tcam_entry { 1193 u32 word[MVPP2_PRS_TCAM_WORDS]; 1194 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1195 }; 1196 1197 union mvpp2_prs_sram_entry { 1198 u32 word[MVPP2_PRS_SRAM_WORDS]; 1199 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1200 }; 1201 1202 struct mvpp2_prs_entry { 1203 u32 index; 1204 union mvpp2_prs_tcam_entry tcam; 1205 union mvpp2_prs_sram_entry sram; 1206 }; 1207 1208 struct mvpp2_prs_shadow { 1209 bool valid; 1210 bool finish; 1211 1212 /* Lookup ID */ 1213 int lu; 1214 1215 /* User defined offset */ 1216 int udf; 1217 1218 /* Result info */ 1219 u32 ri; 1220 u32 ri_mask; 1221 }; 1222 1223 struct mvpp2_cls_flow_entry { 1224 u32 index; 1225 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1226 }; 1227 1228 struct mvpp2_cls_lookup_entry { 1229 u32 lkpid; 1230 u32 way; 1231 u32 data; 1232 }; 1233 1234 struct mvpp2_bm_pool { 1235 /* Pool number in the range 0-7 */ 1236 int id; 1237 enum mvpp2_bm_type type; 1238 1239 /* Buffer Pointers Pool External (BPPE) size */ 1240 int size; 1241 /* Number of buffers for this pool */ 1242 int buf_num; 1243 /* Pool buffer size */ 1244 int buf_size; 1245 /* Packet size */ 1246 int pkt_size; 1247 1248 /* BPPE virtual base address */ 1249 unsigned long *virt_addr; 1250 /* BPPE DMA base address */ 1251 dma_addr_t dma_addr; 1252 1253 /* Ports using BM pool */ 1254 u32 port_map; 1255 }; 1256 1257 /* Static declaractions */ 1258 1259 /* Number of RXQs used by single port */ 1260 static int rxq_number = MVPP2_DEFAULT_RXQ; 1261 /* Number of TXQs used by single port */ 1262 static int txq_number = MVPP2_DEFAULT_TXQ; 1263 1264 static int base_id; 1265 1266 #define MVPP2_DRIVER_NAME "mvpp2" 1267 #define MVPP2_DRIVER_VERSION "1.0" 1268 1269 /* 1270 * U-Boot internal data, mostly uncached buffers for descriptors and data 1271 */ 1272 struct buffer_location { 1273 struct mvpp2_tx_desc *aggr_tx_descs; 1274 struct mvpp2_tx_desc *tx_descs; 1275 struct mvpp2_rx_desc *rx_descs; 1276 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1277 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1278 int first_rxq; 1279 }; 1280 1281 /* 1282 * All 4 interfaces use the same global buffer, since only one interface 1283 * can be enabled at once 1284 */ 1285 static struct buffer_location buffer_loc; 1286 1287 /* 1288 * Page table entries are set to 1MB, or multiples of 1MB 1289 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1290 */ 1291 #define BD_SPACE (1 << 20) 1292 1293 /* Utility/helper methods */ 1294 1295 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1296 { 1297 writel(data, priv->base + offset); 1298 } 1299 1300 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1301 { 1302 return readl(priv->base + offset); 1303 } 1304 1305 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1306 struct mvpp2_tx_desc *tx_desc, 1307 dma_addr_t dma_addr) 1308 { 1309 if (port->priv->hw_version == MVPP21) { 1310 tx_desc->pp21.buf_dma_addr = dma_addr; 1311 } else { 1312 u64 val = (u64)dma_addr; 1313 1314 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1315 tx_desc->pp22.buf_dma_addr_ptp |= val; 1316 } 1317 } 1318 1319 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1320 struct mvpp2_tx_desc *tx_desc, 1321 size_t size) 1322 { 1323 if (port->priv->hw_version == MVPP21) 1324 tx_desc->pp21.data_size = size; 1325 else 1326 tx_desc->pp22.data_size = size; 1327 } 1328 1329 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1330 struct mvpp2_tx_desc *tx_desc, 1331 unsigned int txq) 1332 { 1333 if (port->priv->hw_version == MVPP21) 1334 tx_desc->pp21.phys_txq = txq; 1335 else 1336 tx_desc->pp22.phys_txq = txq; 1337 } 1338 1339 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1340 struct mvpp2_tx_desc *tx_desc, 1341 unsigned int command) 1342 { 1343 if (port->priv->hw_version == MVPP21) 1344 tx_desc->pp21.command = command; 1345 else 1346 tx_desc->pp22.command = command; 1347 } 1348 1349 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1350 struct mvpp2_tx_desc *tx_desc, 1351 unsigned int offset) 1352 { 1353 if (port->priv->hw_version == MVPP21) 1354 tx_desc->pp21.packet_offset = offset; 1355 else 1356 tx_desc->pp22.packet_offset = offset; 1357 } 1358 1359 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1360 struct mvpp2_rx_desc *rx_desc) 1361 { 1362 if (port->priv->hw_version == MVPP21) 1363 return rx_desc->pp21.buf_dma_addr; 1364 else 1365 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1366 } 1367 1368 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1369 struct mvpp2_rx_desc *rx_desc) 1370 { 1371 if (port->priv->hw_version == MVPP21) 1372 return rx_desc->pp21.buf_cookie; 1373 else 1374 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1375 } 1376 1377 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1378 struct mvpp2_rx_desc *rx_desc) 1379 { 1380 if (port->priv->hw_version == MVPP21) 1381 return rx_desc->pp21.data_size; 1382 else 1383 return rx_desc->pp22.data_size; 1384 } 1385 1386 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1387 struct mvpp2_rx_desc *rx_desc) 1388 { 1389 if (port->priv->hw_version == MVPP21) 1390 return rx_desc->pp21.status; 1391 else 1392 return rx_desc->pp22.status; 1393 } 1394 1395 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1396 { 1397 txq_pcpu->txq_get_index++; 1398 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1399 txq_pcpu->txq_get_index = 0; 1400 } 1401 1402 /* Get number of physical egress port */ 1403 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1404 { 1405 return MVPP2_MAX_TCONT + port->id; 1406 } 1407 1408 /* Get number of physical TXQ */ 1409 static inline int mvpp2_txq_phys(int port, int txq) 1410 { 1411 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1412 } 1413 1414 /* Parser configuration routines */ 1415 1416 /* Update parser tcam and sram hw entries */ 1417 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1418 { 1419 int i; 1420 1421 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1422 return -EINVAL; 1423 1424 /* Clear entry invalidation bit */ 1425 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1426 1427 /* Write tcam index - indirect access */ 1428 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1429 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1430 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1431 1432 /* Write sram index - indirect access */ 1433 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1434 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1435 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1436 1437 return 0; 1438 } 1439 1440 /* Read tcam entry from hw */ 1441 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1442 { 1443 int i; 1444 1445 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1446 return -EINVAL; 1447 1448 /* Write tcam index - indirect access */ 1449 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1450 1451 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1452 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1453 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1454 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1455 1456 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1457 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1458 1459 /* Write sram index - indirect access */ 1460 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1461 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1462 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1463 1464 return 0; 1465 } 1466 1467 /* Invalidate tcam hw entry */ 1468 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1469 { 1470 /* Write index - indirect access */ 1471 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1472 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1473 MVPP2_PRS_TCAM_INV_MASK); 1474 } 1475 1476 /* Enable shadow table entry and set its lookup ID */ 1477 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1478 { 1479 priv->prs_shadow[index].valid = true; 1480 priv->prs_shadow[index].lu = lu; 1481 } 1482 1483 /* Update ri fields in shadow table entry */ 1484 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1485 unsigned int ri, unsigned int ri_mask) 1486 { 1487 priv->prs_shadow[index].ri_mask = ri_mask; 1488 priv->prs_shadow[index].ri = ri; 1489 } 1490 1491 /* Update lookup field in tcam sw entry */ 1492 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1493 { 1494 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1495 1496 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1497 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1498 } 1499 1500 /* Update mask for single port in tcam sw entry */ 1501 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1502 unsigned int port, bool add) 1503 { 1504 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1505 1506 if (add) 1507 pe->tcam.byte[enable_off] &= ~(1 << port); 1508 else 1509 pe->tcam.byte[enable_off] |= 1 << port; 1510 } 1511 1512 /* Update port map in tcam sw entry */ 1513 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1514 unsigned int ports) 1515 { 1516 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1517 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1518 1519 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1520 pe->tcam.byte[enable_off] &= ~port_mask; 1521 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1522 } 1523 1524 /* Obtain port map from tcam sw entry */ 1525 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1526 { 1527 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1528 1529 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1530 } 1531 1532 /* Set byte of data and its enable bits in tcam sw entry */ 1533 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1534 unsigned int offs, unsigned char byte, 1535 unsigned char enable) 1536 { 1537 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1538 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1539 } 1540 1541 /* Get byte of data and its enable bits from tcam sw entry */ 1542 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1543 unsigned int offs, unsigned char *byte, 1544 unsigned char *enable) 1545 { 1546 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1547 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1548 } 1549 1550 /* Set ethertype in tcam sw entry */ 1551 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1552 unsigned short ethertype) 1553 { 1554 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1555 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1556 } 1557 1558 /* Set bits in sram sw entry */ 1559 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1560 int val) 1561 { 1562 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1563 } 1564 1565 /* Clear bits in sram sw entry */ 1566 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1567 int val) 1568 { 1569 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1570 } 1571 1572 /* Update ri bits in sram sw entry */ 1573 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1574 unsigned int bits, unsigned int mask) 1575 { 1576 unsigned int i; 1577 1578 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1579 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1580 1581 if (!(mask & BIT(i))) 1582 continue; 1583 1584 if (bits & BIT(i)) 1585 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1586 else 1587 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1588 1589 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1590 } 1591 } 1592 1593 /* Update ai bits in sram sw entry */ 1594 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1595 unsigned int bits, unsigned int mask) 1596 { 1597 unsigned int i; 1598 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1599 1600 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1601 1602 if (!(mask & BIT(i))) 1603 continue; 1604 1605 if (bits & BIT(i)) 1606 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1607 else 1608 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1609 1610 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1611 } 1612 } 1613 1614 /* Read ai bits from sram sw entry */ 1615 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1616 { 1617 u8 bits; 1618 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1619 int ai_en_off = ai_off + 1; 1620 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1621 1622 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1623 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1624 1625 return bits; 1626 } 1627 1628 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1629 * lookup interation 1630 */ 1631 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1632 unsigned int lu) 1633 { 1634 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1635 1636 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1637 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1638 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1639 } 1640 1641 /* In the sram sw entry set sign and value of the next lookup offset 1642 * and the offset value generated to the classifier 1643 */ 1644 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1645 unsigned int op) 1646 { 1647 /* Set sign */ 1648 if (shift < 0) { 1649 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1650 shift = 0 - shift; 1651 } else { 1652 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1653 } 1654 1655 /* Set value */ 1656 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1657 (unsigned char)shift; 1658 1659 /* Reset and set operation */ 1660 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1661 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1662 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1663 1664 /* Set base offset as current */ 1665 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1666 } 1667 1668 /* In the sram sw entry set sign and value of the user defined offset 1669 * generated to the classifier 1670 */ 1671 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1672 unsigned int type, int offset, 1673 unsigned int op) 1674 { 1675 /* Set sign */ 1676 if (offset < 0) { 1677 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1678 offset = 0 - offset; 1679 } else { 1680 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1681 } 1682 1683 /* Set value */ 1684 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1685 MVPP2_PRS_SRAM_UDF_MASK); 1686 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1687 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1688 MVPP2_PRS_SRAM_UDF_BITS)] &= 1689 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1690 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1691 MVPP2_PRS_SRAM_UDF_BITS)] |= 1692 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1693 1694 /* Set offset type */ 1695 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1696 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1697 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1698 1699 /* Set offset operation */ 1700 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1701 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1702 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1703 1704 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1705 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1706 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1707 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1708 1709 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1710 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1711 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1712 1713 /* Set base offset as current */ 1714 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1715 } 1716 1717 /* Find parser flow entry */ 1718 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1719 { 1720 struct mvpp2_prs_entry *pe; 1721 int tid; 1722 1723 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1724 if (!pe) 1725 return NULL; 1726 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1727 1728 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1729 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1730 u8 bits; 1731 1732 if (!priv->prs_shadow[tid].valid || 1733 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1734 continue; 1735 1736 pe->index = tid; 1737 mvpp2_prs_hw_read(priv, pe); 1738 bits = mvpp2_prs_sram_ai_get(pe); 1739 1740 /* Sram store classification lookup ID in AI bits [5:0] */ 1741 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1742 return pe; 1743 } 1744 kfree(pe); 1745 1746 return NULL; 1747 } 1748 1749 /* Return first free tcam index, seeking from start to end */ 1750 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1751 unsigned char end) 1752 { 1753 int tid; 1754 1755 if (start > end) 1756 swap(start, end); 1757 1758 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1759 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1760 1761 for (tid = start; tid <= end; tid++) { 1762 if (!priv->prs_shadow[tid].valid) 1763 return tid; 1764 } 1765 1766 return -EINVAL; 1767 } 1768 1769 /* Enable/disable dropping all mac da's */ 1770 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1771 { 1772 struct mvpp2_prs_entry pe; 1773 1774 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1775 /* Entry exist - update port only */ 1776 pe.index = MVPP2_PE_DROP_ALL; 1777 mvpp2_prs_hw_read(priv, &pe); 1778 } else { 1779 /* Entry doesn't exist - create new */ 1780 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1781 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1782 pe.index = MVPP2_PE_DROP_ALL; 1783 1784 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1785 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1786 MVPP2_PRS_RI_DROP_MASK); 1787 1788 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1789 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1790 1791 /* Update shadow table */ 1792 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1793 1794 /* Mask all ports */ 1795 mvpp2_prs_tcam_port_map_set(&pe, 0); 1796 } 1797 1798 /* Update port mask */ 1799 mvpp2_prs_tcam_port_set(&pe, port, add); 1800 1801 mvpp2_prs_hw_write(priv, &pe); 1802 } 1803 1804 /* Set port to promiscuous mode */ 1805 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1806 { 1807 struct mvpp2_prs_entry pe; 1808 1809 /* Promiscuous mode - Accept unknown packets */ 1810 1811 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1812 /* Entry exist - update port only */ 1813 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1814 mvpp2_prs_hw_read(priv, &pe); 1815 } else { 1816 /* Entry doesn't exist - create new */ 1817 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1818 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1819 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1820 1821 /* Continue - set next lookup */ 1822 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1823 1824 /* Set result info bits */ 1825 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1826 MVPP2_PRS_RI_L2_CAST_MASK); 1827 1828 /* Shift to ethertype */ 1829 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1830 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1831 1832 /* Mask all ports */ 1833 mvpp2_prs_tcam_port_map_set(&pe, 0); 1834 1835 /* Update shadow table */ 1836 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1837 } 1838 1839 /* Update port mask */ 1840 mvpp2_prs_tcam_port_set(&pe, port, add); 1841 1842 mvpp2_prs_hw_write(priv, &pe); 1843 } 1844 1845 /* Accept multicast */ 1846 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1847 bool add) 1848 { 1849 struct mvpp2_prs_entry pe; 1850 unsigned char da_mc; 1851 1852 /* Ethernet multicast address first byte is 1853 * 0x01 for IPv4 and 0x33 for IPv6 1854 */ 1855 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1856 1857 if (priv->prs_shadow[index].valid) { 1858 /* Entry exist - update port only */ 1859 pe.index = index; 1860 mvpp2_prs_hw_read(priv, &pe); 1861 } else { 1862 /* Entry doesn't exist - create new */ 1863 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1864 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1865 pe.index = index; 1866 1867 /* Continue - set next lookup */ 1868 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1869 1870 /* Set result info bits */ 1871 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1872 MVPP2_PRS_RI_L2_CAST_MASK); 1873 1874 /* Update tcam entry data first byte */ 1875 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1876 1877 /* Shift to ethertype */ 1878 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1879 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1880 1881 /* Mask all ports */ 1882 mvpp2_prs_tcam_port_map_set(&pe, 0); 1883 1884 /* Update shadow table */ 1885 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1886 } 1887 1888 /* Update port mask */ 1889 mvpp2_prs_tcam_port_set(&pe, port, add); 1890 1891 mvpp2_prs_hw_write(priv, &pe); 1892 } 1893 1894 /* Parser per-port initialization */ 1895 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1896 int lu_max, int offset) 1897 { 1898 u32 val; 1899 1900 /* Set lookup ID */ 1901 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1902 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1903 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1904 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1905 1906 /* Set maximum number of loops for packet received from port */ 1907 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1908 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1909 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1910 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1911 1912 /* Set initial offset for packet header extraction for the first 1913 * searching loop 1914 */ 1915 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1916 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1917 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1918 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1919 } 1920 1921 /* Default flow entries initialization for all ports */ 1922 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1923 { 1924 struct mvpp2_prs_entry pe; 1925 int port; 1926 1927 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1928 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1929 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1930 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1931 1932 /* Mask all ports */ 1933 mvpp2_prs_tcam_port_map_set(&pe, 0); 1934 1935 /* Set flow ID*/ 1936 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1937 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1938 1939 /* Update shadow table and hw entry */ 1940 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1941 mvpp2_prs_hw_write(priv, &pe); 1942 } 1943 } 1944 1945 /* Set default entry for Marvell Header field */ 1946 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1947 { 1948 struct mvpp2_prs_entry pe; 1949 1950 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1951 1952 pe.index = MVPP2_PE_MH_DEFAULT; 1953 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1954 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1955 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1956 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1957 1958 /* Unmask all ports */ 1959 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1960 1961 /* Update shadow table and hw entry */ 1962 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1963 mvpp2_prs_hw_write(priv, &pe); 1964 } 1965 1966 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1967 * multicast MAC addresses 1968 */ 1969 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1970 { 1971 struct mvpp2_prs_entry pe; 1972 1973 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1974 1975 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1976 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1977 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1978 1979 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1980 MVPP2_PRS_RI_DROP_MASK); 1981 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1982 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1983 1984 /* Unmask all ports */ 1985 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1986 1987 /* Update shadow table and hw entry */ 1988 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1989 mvpp2_prs_hw_write(priv, &pe); 1990 1991 /* place holders only - no ports */ 1992 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1993 mvpp2_prs_mac_promisc_set(priv, 0, false); 1994 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1995 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1996 } 1997 1998 /* Match basic ethertypes */ 1999 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 2000 { 2001 struct mvpp2_prs_entry pe; 2002 int tid; 2003 2004 /* Ethertype: PPPoE */ 2005 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2006 MVPP2_PE_LAST_FREE_TID); 2007 if (tid < 0) 2008 return tid; 2009 2010 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2011 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2012 pe.index = tid; 2013 2014 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2015 2016 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2017 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2018 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2019 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2020 MVPP2_PRS_RI_PPPOE_MASK); 2021 2022 /* Update shadow table and hw entry */ 2023 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2024 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2025 priv->prs_shadow[pe.index].finish = false; 2026 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2027 MVPP2_PRS_RI_PPPOE_MASK); 2028 mvpp2_prs_hw_write(priv, &pe); 2029 2030 /* Ethertype: ARP */ 2031 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2032 MVPP2_PE_LAST_FREE_TID); 2033 if (tid < 0) 2034 return tid; 2035 2036 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2037 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2038 pe.index = tid; 2039 2040 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2041 2042 /* Generate flow in the next iteration*/ 2043 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2044 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2045 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2046 MVPP2_PRS_RI_L3_PROTO_MASK); 2047 /* Set L3 offset */ 2048 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2049 MVPP2_ETH_TYPE_LEN, 2050 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2051 2052 /* Update shadow table and hw entry */ 2053 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2054 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2055 priv->prs_shadow[pe.index].finish = true; 2056 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2057 MVPP2_PRS_RI_L3_PROTO_MASK); 2058 mvpp2_prs_hw_write(priv, &pe); 2059 2060 /* Ethertype: LBTD */ 2061 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2062 MVPP2_PE_LAST_FREE_TID); 2063 if (tid < 0) 2064 return tid; 2065 2066 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2067 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2068 pe.index = tid; 2069 2070 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2071 2072 /* Generate flow in the next iteration*/ 2073 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2074 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2075 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2076 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2077 MVPP2_PRS_RI_CPU_CODE_MASK | 2078 MVPP2_PRS_RI_UDF3_MASK); 2079 /* Set L3 offset */ 2080 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2081 MVPP2_ETH_TYPE_LEN, 2082 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2083 2084 /* Update shadow table and hw entry */ 2085 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2086 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2087 priv->prs_shadow[pe.index].finish = true; 2088 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2089 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2090 MVPP2_PRS_RI_CPU_CODE_MASK | 2091 MVPP2_PRS_RI_UDF3_MASK); 2092 mvpp2_prs_hw_write(priv, &pe); 2093 2094 /* Ethertype: IPv4 without options */ 2095 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2096 MVPP2_PE_LAST_FREE_TID); 2097 if (tid < 0) 2098 return tid; 2099 2100 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2101 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2102 pe.index = tid; 2103 2104 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2105 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2106 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2107 MVPP2_PRS_IPV4_HEAD_MASK | 2108 MVPP2_PRS_IPV4_IHL_MASK); 2109 2110 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2111 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2112 MVPP2_PRS_RI_L3_PROTO_MASK); 2113 /* Skip eth_type + 4 bytes of IP header */ 2114 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2115 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2116 /* Set L3 offset */ 2117 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2118 MVPP2_ETH_TYPE_LEN, 2119 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2120 2121 /* Update shadow table and hw entry */ 2122 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2123 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2124 priv->prs_shadow[pe.index].finish = false; 2125 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2126 MVPP2_PRS_RI_L3_PROTO_MASK); 2127 mvpp2_prs_hw_write(priv, &pe); 2128 2129 /* Ethertype: IPv4 with options */ 2130 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2131 MVPP2_PE_LAST_FREE_TID); 2132 if (tid < 0) 2133 return tid; 2134 2135 pe.index = tid; 2136 2137 /* Clear tcam data before updating */ 2138 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2139 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2140 2141 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2142 MVPP2_PRS_IPV4_HEAD, 2143 MVPP2_PRS_IPV4_HEAD_MASK); 2144 2145 /* Clear ri before updating */ 2146 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2147 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2148 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2149 MVPP2_PRS_RI_L3_PROTO_MASK); 2150 2151 /* Update shadow table and hw entry */ 2152 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2153 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2154 priv->prs_shadow[pe.index].finish = false; 2155 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2156 MVPP2_PRS_RI_L3_PROTO_MASK); 2157 mvpp2_prs_hw_write(priv, &pe); 2158 2159 /* Ethertype: IPv6 without options */ 2160 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2161 MVPP2_PE_LAST_FREE_TID); 2162 if (tid < 0) 2163 return tid; 2164 2165 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2166 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2167 pe.index = tid; 2168 2169 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2170 2171 /* Skip DIP of IPV6 header */ 2172 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2173 MVPP2_MAX_L3_ADDR_SIZE, 2174 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2175 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2176 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2177 MVPP2_PRS_RI_L3_PROTO_MASK); 2178 /* Set L3 offset */ 2179 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2180 MVPP2_ETH_TYPE_LEN, 2181 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2182 2183 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2184 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2185 priv->prs_shadow[pe.index].finish = false; 2186 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2187 MVPP2_PRS_RI_L3_PROTO_MASK); 2188 mvpp2_prs_hw_write(priv, &pe); 2189 2190 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2191 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2192 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2193 pe.index = MVPP2_PE_ETH_TYPE_UN; 2194 2195 /* Unmask all ports */ 2196 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2197 2198 /* Generate flow in the next iteration*/ 2199 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2200 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2201 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2202 MVPP2_PRS_RI_L3_PROTO_MASK); 2203 /* Set L3 offset even it's unknown L3 */ 2204 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2205 MVPP2_ETH_TYPE_LEN, 2206 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2207 2208 /* Update shadow table and hw entry */ 2209 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2210 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2211 priv->prs_shadow[pe.index].finish = true; 2212 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2213 MVPP2_PRS_RI_L3_PROTO_MASK); 2214 mvpp2_prs_hw_write(priv, &pe); 2215 2216 return 0; 2217 } 2218 2219 /* Parser default initialization */ 2220 static int mvpp2_prs_default_init(struct udevice *dev, 2221 struct mvpp2 *priv) 2222 { 2223 int err, index, i; 2224 2225 /* Enable tcam table */ 2226 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2227 2228 /* Clear all tcam and sram entries */ 2229 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2230 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2231 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2232 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2233 2234 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2235 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2236 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2237 } 2238 2239 /* Invalidate all tcam entries */ 2240 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2241 mvpp2_prs_hw_inv(priv, index); 2242 2243 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2244 sizeof(struct mvpp2_prs_shadow), 2245 GFP_KERNEL); 2246 if (!priv->prs_shadow) 2247 return -ENOMEM; 2248 2249 /* Always start from lookup = 0 */ 2250 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2251 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2252 MVPP2_PRS_PORT_LU_MAX, 0); 2253 2254 mvpp2_prs_def_flow_init(priv); 2255 2256 mvpp2_prs_mh_init(priv); 2257 2258 mvpp2_prs_mac_init(priv); 2259 2260 err = mvpp2_prs_etype_init(priv); 2261 if (err) 2262 return err; 2263 2264 return 0; 2265 } 2266 2267 /* Compare MAC DA with tcam entry data */ 2268 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2269 const u8 *da, unsigned char *mask) 2270 { 2271 unsigned char tcam_byte, tcam_mask; 2272 int index; 2273 2274 for (index = 0; index < ETH_ALEN; index++) { 2275 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2276 if (tcam_mask != mask[index]) 2277 return false; 2278 2279 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2280 return false; 2281 } 2282 2283 return true; 2284 } 2285 2286 /* Find tcam entry with matched pair <MAC DA, port> */ 2287 static struct mvpp2_prs_entry * 2288 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2289 unsigned char *mask, int udf_type) 2290 { 2291 struct mvpp2_prs_entry *pe; 2292 int tid; 2293 2294 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2295 if (!pe) 2296 return NULL; 2297 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2298 2299 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2300 for (tid = MVPP2_PE_FIRST_FREE_TID; 2301 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2302 unsigned int entry_pmap; 2303 2304 if (!priv->prs_shadow[tid].valid || 2305 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2306 (priv->prs_shadow[tid].udf != udf_type)) 2307 continue; 2308 2309 pe->index = tid; 2310 mvpp2_prs_hw_read(priv, pe); 2311 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2312 2313 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2314 entry_pmap == pmap) 2315 return pe; 2316 } 2317 kfree(pe); 2318 2319 return NULL; 2320 } 2321 2322 /* Update parser's mac da entry */ 2323 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2324 const u8 *da, bool add) 2325 { 2326 struct mvpp2_prs_entry *pe; 2327 unsigned int pmap, len, ri; 2328 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2329 int tid; 2330 2331 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2332 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2333 MVPP2_PRS_UDF_MAC_DEF); 2334 2335 /* No such entry */ 2336 if (!pe) { 2337 if (!add) 2338 return 0; 2339 2340 /* Create new TCAM entry */ 2341 /* Find first range mac entry*/ 2342 for (tid = MVPP2_PE_FIRST_FREE_TID; 2343 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2344 if (priv->prs_shadow[tid].valid && 2345 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2346 (priv->prs_shadow[tid].udf == 2347 MVPP2_PRS_UDF_MAC_RANGE)) 2348 break; 2349 2350 /* Go through the all entries from first to last */ 2351 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2352 tid - 1); 2353 if (tid < 0) 2354 return tid; 2355 2356 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2357 if (!pe) 2358 return -1; 2359 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2360 pe->index = tid; 2361 2362 /* Mask all ports */ 2363 mvpp2_prs_tcam_port_map_set(pe, 0); 2364 } 2365 2366 /* Update port mask */ 2367 mvpp2_prs_tcam_port_set(pe, port, add); 2368 2369 /* Invalidate the entry if no ports are left enabled */ 2370 pmap = mvpp2_prs_tcam_port_map_get(pe); 2371 if (pmap == 0) { 2372 if (add) { 2373 kfree(pe); 2374 return -1; 2375 } 2376 mvpp2_prs_hw_inv(priv, pe->index); 2377 priv->prs_shadow[pe->index].valid = false; 2378 kfree(pe); 2379 return 0; 2380 } 2381 2382 /* Continue - set next lookup */ 2383 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2384 2385 /* Set match on DA */ 2386 len = ETH_ALEN; 2387 while (len--) 2388 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2389 2390 /* Set result info bits */ 2391 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2392 2393 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2394 MVPP2_PRS_RI_MAC_ME_MASK); 2395 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2396 MVPP2_PRS_RI_MAC_ME_MASK); 2397 2398 /* Shift to ethertype */ 2399 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2400 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2401 2402 /* Update shadow table and hw entry */ 2403 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2404 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2405 mvpp2_prs_hw_write(priv, pe); 2406 2407 kfree(pe); 2408 2409 return 0; 2410 } 2411 2412 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2413 { 2414 int err; 2415 2416 /* Remove old parser entry */ 2417 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2418 false); 2419 if (err) 2420 return err; 2421 2422 /* Add new parser entry */ 2423 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2424 if (err) 2425 return err; 2426 2427 /* Set addr in the device */ 2428 memcpy(port->dev_addr, da, ETH_ALEN); 2429 2430 return 0; 2431 } 2432 2433 /* Set prs flow for the port */ 2434 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2435 { 2436 struct mvpp2_prs_entry *pe; 2437 int tid; 2438 2439 pe = mvpp2_prs_flow_find(port->priv, port->id); 2440 2441 /* Such entry not exist */ 2442 if (!pe) { 2443 /* Go through the all entires from last to first */ 2444 tid = mvpp2_prs_tcam_first_free(port->priv, 2445 MVPP2_PE_LAST_FREE_TID, 2446 MVPP2_PE_FIRST_FREE_TID); 2447 if (tid < 0) 2448 return tid; 2449 2450 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2451 if (!pe) 2452 return -ENOMEM; 2453 2454 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2455 pe->index = tid; 2456 2457 /* Set flow ID*/ 2458 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2459 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2460 2461 /* Update shadow table */ 2462 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2463 } 2464 2465 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2466 mvpp2_prs_hw_write(port->priv, pe); 2467 kfree(pe); 2468 2469 return 0; 2470 } 2471 2472 /* Classifier configuration routines */ 2473 2474 /* Update classification flow table registers */ 2475 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2476 struct mvpp2_cls_flow_entry *fe) 2477 { 2478 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2479 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2480 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2481 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2482 } 2483 2484 /* Update classification lookup table register */ 2485 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2486 struct mvpp2_cls_lookup_entry *le) 2487 { 2488 u32 val; 2489 2490 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2491 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2492 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2493 } 2494 2495 /* Classifier default initialization */ 2496 static void mvpp2_cls_init(struct mvpp2 *priv) 2497 { 2498 struct mvpp2_cls_lookup_entry le; 2499 struct mvpp2_cls_flow_entry fe; 2500 int index; 2501 2502 /* Enable classifier */ 2503 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2504 2505 /* Clear classifier flow table */ 2506 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2507 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2508 fe.index = index; 2509 mvpp2_cls_flow_write(priv, &fe); 2510 } 2511 2512 /* Clear classifier lookup table */ 2513 le.data = 0; 2514 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2515 le.lkpid = index; 2516 le.way = 0; 2517 mvpp2_cls_lookup_write(priv, &le); 2518 2519 le.way = 1; 2520 mvpp2_cls_lookup_write(priv, &le); 2521 } 2522 } 2523 2524 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2525 { 2526 struct mvpp2_cls_lookup_entry le; 2527 u32 val; 2528 2529 /* Set way for the port */ 2530 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2531 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2532 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2533 2534 /* Pick the entry to be accessed in lookup ID decoding table 2535 * according to the way and lkpid. 2536 */ 2537 le.lkpid = port->id; 2538 le.way = 0; 2539 le.data = 0; 2540 2541 /* Set initial CPU queue for receiving packets */ 2542 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2543 le.data |= port->first_rxq; 2544 2545 /* Disable classification engines */ 2546 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2547 2548 /* Update lookup ID table entry */ 2549 mvpp2_cls_lookup_write(port->priv, &le); 2550 } 2551 2552 /* Set CPU queue number for oversize packets */ 2553 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2554 { 2555 u32 val; 2556 2557 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2558 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2559 2560 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2561 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2562 2563 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2564 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2565 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2566 } 2567 2568 /* Buffer Manager configuration routines */ 2569 2570 /* Create pool */ 2571 static int mvpp2_bm_pool_create(struct udevice *dev, 2572 struct mvpp2 *priv, 2573 struct mvpp2_bm_pool *bm_pool, int size) 2574 { 2575 u32 val; 2576 2577 /* Number of buffer pointers must be a multiple of 16, as per 2578 * hardware constraints 2579 */ 2580 if (!IS_ALIGNED(size, 16)) 2581 return -EINVAL; 2582 2583 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2584 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2585 if (!bm_pool->virt_addr) 2586 return -ENOMEM; 2587 2588 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2589 MVPP2_BM_POOL_PTR_ALIGN)) { 2590 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2591 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2592 return -ENOMEM; 2593 } 2594 2595 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2596 lower_32_bits(bm_pool->dma_addr)); 2597 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2598 2599 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2600 val |= MVPP2_BM_START_MASK; 2601 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2602 2603 bm_pool->type = MVPP2_BM_FREE; 2604 bm_pool->size = size; 2605 bm_pool->pkt_size = 0; 2606 bm_pool->buf_num = 0; 2607 2608 return 0; 2609 } 2610 2611 /* Set pool buffer size */ 2612 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2613 struct mvpp2_bm_pool *bm_pool, 2614 int buf_size) 2615 { 2616 u32 val; 2617 2618 bm_pool->buf_size = buf_size; 2619 2620 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2621 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2622 } 2623 2624 /* Free all buffers from the pool */ 2625 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2626 struct mvpp2_bm_pool *bm_pool) 2627 { 2628 int i; 2629 2630 for (i = 0; i < bm_pool->buf_num; i++) { 2631 /* Allocate buffer back from the buffer manager */ 2632 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2633 } 2634 2635 bm_pool->buf_num = 0; 2636 } 2637 2638 /* Cleanup pool */ 2639 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2640 struct mvpp2 *priv, 2641 struct mvpp2_bm_pool *bm_pool) 2642 { 2643 u32 val; 2644 2645 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2646 if (bm_pool->buf_num) { 2647 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2648 return 0; 2649 } 2650 2651 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2652 val |= MVPP2_BM_STOP_MASK; 2653 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2654 2655 return 0; 2656 } 2657 2658 static int mvpp2_bm_pools_init(struct udevice *dev, 2659 struct mvpp2 *priv) 2660 { 2661 int i, err, size; 2662 struct mvpp2_bm_pool *bm_pool; 2663 2664 /* Create all pools with maximum size */ 2665 size = MVPP2_BM_POOL_SIZE_MAX; 2666 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2667 bm_pool = &priv->bm_pools[i]; 2668 bm_pool->id = i; 2669 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2670 if (err) 2671 goto err_unroll_pools; 2672 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 2673 } 2674 return 0; 2675 2676 err_unroll_pools: 2677 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2678 for (i = i - 1; i >= 0; i--) 2679 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2680 return err; 2681 } 2682 2683 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2684 { 2685 int i, err; 2686 2687 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2688 /* Mask BM all interrupts */ 2689 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2690 /* Clear BM cause register */ 2691 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2692 } 2693 2694 /* Allocate and initialize BM pools */ 2695 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2696 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2697 if (!priv->bm_pools) 2698 return -ENOMEM; 2699 2700 err = mvpp2_bm_pools_init(dev, priv); 2701 if (err < 0) 2702 return err; 2703 return 0; 2704 } 2705 2706 /* Attach long pool to rxq */ 2707 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2708 int lrxq, int long_pool) 2709 { 2710 u32 val, mask; 2711 int prxq; 2712 2713 /* Get queue physical ID */ 2714 prxq = port->rxqs[lrxq]->id; 2715 2716 if (port->priv->hw_version == MVPP21) 2717 mask = MVPP21_RXQ_POOL_LONG_MASK; 2718 else 2719 mask = MVPP22_RXQ_POOL_LONG_MASK; 2720 2721 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2722 val &= ~mask; 2723 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2724 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2725 } 2726 2727 /* Set pool number in a BM cookie */ 2728 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2729 { 2730 u32 bm; 2731 2732 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2733 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2734 2735 return bm; 2736 } 2737 2738 /* Get pool number from a BM cookie */ 2739 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2740 { 2741 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2742 } 2743 2744 /* Release buffer to BM */ 2745 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2746 dma_addr_t buf_dma_addr, 2747 unsigned long buf_phys_addr) 2748 { 2749 if (port->priv->hw_version == MVPP22) { 2750 u32 val = 0; 2751 2752 if (sizeof(dma_addr_t) == 8) 2753 val |= upper_32_bits(buf_dma_addr) & 2754 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2755 2756 if (sizeof(phys_addr_t) == 8) 2757 val |= (upper_32_bits(buf_phys_addr) 2758 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2759 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2760 2761 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2762 } 2763 2764 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2765 * returned in the "cookie" field of the RX 2766 * descriptor. Instead of storing the virtual address, we 2767 * store the physical address 2768 */ 2769 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2770 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2771 } 2772 2773 /* Refill BM pool */ 2774 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2775 dma_addr_t dma_addr, 2776 phys_addr_t phys_addr) 2777 { 2778 int pool = mvpp2_bm_cookie_pool_get(bm); 2779 2780 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2781 } 2782 2783 /* Allocate buffers for the pool */ 2784 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2785 struct mvpp2_bm_pool *bm_pool, int buf_num) 2786 { 2787 int i; 2788 2789 if (buf_num < 0 || 2790 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2791 netdev_err(port->dev, 2792 "cannot allocate %d buffers for pool %d\n", 2793 buf_num, bm_pool->id); 2794 return 0; 2795 } 2796 2797 for (i = 0; i < buf_num; i++) { 2798 mvpp2_bm_pool_put(port, bm_pool->id, 2799 (dma_addr_t)buffer_loc.rx_buffer[i], 2800 (unsigned long)buffer_loc.rx_buffer[i]); 2801 2802 } 2803 2804 /* Update BM driver with number of buffers added to pool */ 2805 bm_pool->buf_num += i; 2806 2807 return i; 2808 } 2809 2810 /* Notify the driver that BM pool is being used as specific type and return the 2811 * pool pointer on success 2812 */ 2813 static struct mvpp2_bm_pool * 2814 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2815 int pkt_size) 2816 { 2817 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2818 int num; 2819 2820 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2821 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2822 return NULL; 2823 } 2824 2825 if (new_pool->type == MVPP2_BM_FREE) 2826 new_pool->type = type; 2827 2828 /* Allocate buffers in case BM pool is used as long pool, but packet 2829 * size doesn't match MTU or BM pool hasn't being used yet 2830 */ 2831 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2832 (new_pool->pkt_size == 0)) { 2833 int pkts_num; 2834 2835 /* Set default buffer number or free all the buffers in case 2836 * the pool is not empty 2837 */ 2838 pkts_num = new_pool->buf_num; 2839 if (pkts_num == 0) 2840 pkts_num = type == MVPP2_BM_SWF_LONG ? 2841 MVPP2_BM_LONG_BUF_NUM : 2842 MVPP2_BM_SHORT_BUF_NUM; 2843 else 2844 mvpp2_bm_bufs_free(NULL, 2845 port->priv, new_pool); 2846 2847 new_pool->pkt_size = pkt_size; 2848 2849 /* Allocate buffers for this pool */ 2850 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2851 if (num != pkts_num) { 2852 dev_err(dev, "pool %d: %d of %d allocated\n", 2853 new_pool->id, num, pkts_num); 2854 return NULL; 2855 } 2856 } 2857 2858 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 2859 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 2860 2861 return new_pool; 2862 } 2863 2864 /* Initialize pools for swf */ 2865 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2866 { 2867 int rxq; 2868 2869 if (!port->pool_long) { 2870 port->pool_long = 2871 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2872 MVPP2_BM_SWF_LONG, 2873 port->pkt_size); 2874 if (!port->pool_long) 2875 return -ENOMEM; 2876 2877 port->pool_long->port_map |= (1 << port->id); 2878 2879 for (rxq = 0; rxq < rxq_number; rxq++) 2880 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2881 } 2882 2883 return 0; 2884 } 2885 2886 /* Port configuration routines */ 2887 2888 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2889 { 2890 u32 val; 2891 2892 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2893 2894 switch (port->phy_interface) { 2895 case PHY_INTERFACE_MODE_SGMII: 2896 val |= MVPP2_GMAC_INBAND_AN_MASK; 2897 break; 2898 case PHY_INTERFACE_MODE_RGMII: 2899 case PHY_INTERFACE_MODE_RGMII_ID: 2900 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2901 default: 2902 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2903 } 2904 2905 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2906 } 2907 2908 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2909 { 2910 u32 val; 2911 2912 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2913 val |= MVPP2_GMAC_FC_ADV_EN; 2914 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2915 } 2916 2917 static void mvpp2_port_enable(struct mvpp2_port *port) 2918 { 2919 u32 val; 2920 2921 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2922 val |= MVPP2_GMAC_PORT_EN_MASK; 2923 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2924 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2925 } 2926 2927 static void mvpp2_port_disable(struct mvpp2_port *port) 2928 { 2929 u32 val; 2930 2931 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2932 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2933 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2934 } 2935 2936 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2937 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2938 { 2939 u32 val; 2940 2941 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2942 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2943 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2944 } 2945 2946 /* Configure loopback port */ 2947 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2948 { 2949 u32 val; 2950 2951 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2952 2953 if (port->speed == 1000) 2954 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2955 else 2956 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2957 2958 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2959 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2960 else 2961 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2962 2963 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2964 } 2965 2966 static void mvpp2_port_reset(struct mvpp2_port *port) 2967 { 2968 u32 val; 2969 2970 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2971 ~MVPP2_GMAC_PORT_RESET_MASK; 2972 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2973 2974 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2975 MVPP2_GMAC_PORT_RESET_MASK) 2976 continue; 2977 } 2978 2979 /* Change maximum receive size of the port */ 2980 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2981 { 2982 u32 val; 2983 2984 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2985 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2986 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2987 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2988 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2989 } 2990 2991 /* PPv2.2 GoP/GMAC config */ 2992 2993 /* Set the MAC to reset or exit from reset */ 2994 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2995 { 2996 u32 val; 2997 2998 /* read - modify - write */ 2999 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3000 if (reset) 3001 val |= MVPP2_GMAC_PORT_RESET_MASK; 3002 else 3003 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 3004 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3005 3006 return 0; 3007 } 3008 3009 /* 3010 * gop_gpcs_mode_cfg 3011 * 3012 * Configure port to working with Gig PCS or don't. 3013 */ 3014 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3015 { 3016 u32 val; 3017 3018 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3019 if (en) 3020 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3021 else 3022 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3023 /* enable / disable PCS on this port */ 3024 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3025 3026 return 0; 3027 } 3028 3029 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3030 { 3031 u32 val; 3032 3033 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3034 if (en) 3035 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3036 else 3037 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3038 /* enable / disable PCS on this port */ 3039 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3040 3041 return 0; 3042 } 3043 3044 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3045 { 3046 u32 val, thresh; 3047 3048 /* 3049 * Configure minimal level of the Tx FIFO before the lower part 3050 * starts to read a packet 3051 */ 3052 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3053 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3054 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3055 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3056 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3057 3058 /* Disable bypass of sync module */ 3059 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3060 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3061 /* configure DP clock select according to mode */ 3062 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3063 /* configure QSGMII bypass according to mode */ 3064 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3065 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3066 3067 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3068 /* 3069 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3070 * transceiver 3071 */ 3072 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3073 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3074 3075 /* configure AN 0x9268 */ 3076 val = MVPP2_GMAC_EN_PCS_AN | 3077 MVPP2_GMAC_AN_BYPASS_EN | 3078 MVPP2_GMAC_CONFIG_MII_SPEED | 3079 MVPP2_GMAC_CONFIG_GMII_SPEED | 3080 MVPP2_GMAC_FC_ADV_EN | 3081 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3082 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3083 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3084 } 3085 3086 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3087 { 3088 u32 val, thresh; 3089 3090 /* 3091 * Configure minimal level of the Tx FIFO before the lower part 3092 * starts to read a packet 3093 */ 3094 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3095 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3096 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3097 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3098 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3099 3100 /* Disable bypass of sync module */ 3101 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3102 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3103 /* configure DP clock select according to mode */ 3104 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3105 /* configure QSGMII bypass according to mode */ 3106 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3107 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3108 3109 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3110 /* configure GIG MAC to SGMII mode */ 3111 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3112 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3113 3114 /* configure AN */ 3115 val = MVPP2_GMAC_EN_PCS_AN | 3116 MVPP2_GMAC_AN_BYPASS_EN | 3117 MVPP2_GMAC_AN_SPEED_EN | 3118 MVPP2_GMAC_EN_FC_AN | 3119 MVPP2_GMAC_AN_DUPLEX_EN | 3120 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3121 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3122 } 3123 3124 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3125 { 3126 u32 val, thresh; 3127 3128 /* 3129 * Configure minimal level of the Tx FIFO before the lower part 3130 * starts to read a packet 3131 */ 3132 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3133 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3134 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3135 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3136 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3137 3138 /* Disable bypass of sync module */ 3139 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3140 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3141 /* configure DP clock select according to mode */ 3142 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3143 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3144 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3145 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3146 3147 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3148 /* configure GIG MAC to SGMII mode */ 3149 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3150 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3151 3152 /* configure AN 0xb8e8 */ 3153 val = MVPP2_GMAC_AN_BYPASS_EN | 3154 MVPP2_GMAC_AN_SPEED_EN | 3155 MVPP2_GMAC_EN_FC_AN | 3156 MVPP2_GMAC_AN_DUPLEX_EN | 3157 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3158 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3159 } 3160 3161 /* Set the internal mux's to the required MAC in the GOP */ 3162 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3163 { 3164 u32 val; 3165 3166 /* Set TX FIFO thresholds */ 3167 switch (port->phy_interface) { 3168 case PHY_INTERFACE_MODE_SGMII: 3169 if (port->phy_speed == 2500) 3170 gop_gmac_sgmii2_5_cfg(port); 3171 else 3172 gop_gmac_sgmii_cfg(port); 3173 break; 3174 3175 case PHY_INTERFACE_MODE_RGMII: 3176 case PHY_INTERFACE_MODE_RGMII_ID: 3177 gop_gmac_rgmii_cfg(port); 3178 break; 3179 3180 default: 3181 return -1; 3182 } 3183 3184 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3185 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3186 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3187 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3188 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3189 3190 /* PeriodicXonEn disable */ 3191 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3192 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3193 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3194 3195 return 0; 3196 } 3197 3198 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3199 { 3200 u32 val; 3201 3202 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3203 if (port->gop_id > 0) 3204 return; 3205 3206 /* configure 1Gig MAC mode */ 3207 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3208 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3209 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3210 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3211 } 3212 3213 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3214 { 3215 u32 val; 3216 3217 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3218 if (reset) 3219 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3220 else 3221 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3222 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3223 3224 return 0; 3225 } 3226 3227 /* Set the internal mux's to the required PCS in the PI */ 3228 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3229 { 3230 u32 val; 3231 int lane; 3232 3233 switch (num_of_lanes) { 3234 case 1: 3235 lane = 0; 3236 break; 3237 case 2: 3238 lane = 1; 3239 break; 3240 case 4: 3241 lane = 2; 3242 break; 3243 default: 3244 return -1; 3245 } 3246 3247 /* configure XG MAC mode */ 3248 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3249 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3250 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3251 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3252 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3253 3254 return 0; 3255 } 3256 3257 static int gop_mpcs_mode(struct mvpp2_port *port) 3258 { 3259 u32 val; 3260 3261 /* configure PCS40G COMMON CONTROL */ 3262 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3263 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3264 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3265 3266 /* configure PCS CLOCK RESET */ 3267 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3268 val &= ~CLK_DIVISION_RATIO_MASK; 3269 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3270 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3271 3272 val &= ~CLK_DIV_PHASE_SET_MASK; 3273 val |= MAC_CLK_RESET_MASK; 3274 val |= RX_SD_CLK_RESET_MASK; 3275 val |= TX_SD_CLK_RESET_MASK; 3276 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3277 3278 return 0; 3279 } 3280 3281 /* Set the internal mux's to the required MAC in the GOP */ 3282 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3283 { 3284 u32 val; 3285 3286 /* configure 10G MAC mode */ 3287 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3288 val |= MVPP22_XLG_RX_FC_EN; 3289 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3290 3291 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3292 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3293 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3294 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3295 3296 /* read - modify - write */ 3297 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3298 val &= ~MVPP22_XLG_MODE_DMA_1G; 3299 val |= MVPP22_XLG_FORWARD_PFC_EN; 3300 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3301 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3302 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3303 3304 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3305 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3306 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3307 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3308 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3309 3310 /* unmask link change interrupt */ 3311 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3312 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3313 val |= 1; /* unmask summary bit */ 3314 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3315 3316 return 0; 3317 } 3318 3319 /* Set PCS to reset or exit from reset */ 3320 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3321 { 3322 u32 val; 3323 3324 /* read - modify - write */ 3325 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3326 if (reset) 3327 val &= ~MVPP22_XPCS_PCSRESET; 3328 else 3329 val |= MVPP22_XPCS_PCSRESET; 3330 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3331 3332 return 0; 3333 } 3334 3335 /* Set the MAC to reset or exit from reset */ 3336 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3337 { 3338 u32 val; 3339 3340 /* read - modify - write */ 3341 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3342 if (reset) 3343 val &= ~MVPP22_XLG_MAC_RESETN; 3344 else 3345 val |= MVPP22_XLG_MAC_RESETN; 3346 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3347 3348 return 0; 3349 } 3350 3351 /* 3352 * gop_port_init 3353 * 3354 * Init physical port. Configures the port mode and all it's elements 3355 * accordingly. 3356 * Does not verify that the selected mode/port number is valid at the 3357 * core level. 3358 */ 3359 static int gop_port_init(struct mvpp2_port *port) 3360 { 3361 int mac_num = port->gop_id; 3362 int num_of_act_lanes; 3363 3364 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3365 netdev_err(NULL, "%s: illegal port number %d", __func__, 3366 mac_num); 3367 return -1; 3368 } 3369 3370 switch (port->phy_interface) { 3371 case PHY_INTERFACE_MODE_RGMII: 3372 case PHY_INTERFACE_MODE_RGMII_ID: 3373 gop_gmac_reset(port, 1); 3374 3375 /* configure PCS */ 3376 gop_gpcs_mode_cfg(port, 0); 3377 gop_bypass_clk_cfg(port, 1); 3378 3379 /* configure MAC */ 3380 gop_gmac_mode_cfg(port); 3381 /* pcs unreset */ 3382 gop_gpcs_reset(port, 0); 3383 3384 /* mac unreset */ 3385 gop_gmac_reset(port, 0); 3386 break; 3387 3388 case PHY_INTERFACE_MODE_SGMII: 3389 /* configure PCS */ 3390 gop_gpcs_mode_cfg(port, 1); 3391 3392 /* configure MAC */ 3393 gop_gmac_mode_cfg(port); 3394 /* select proper Mac mode */ 3395 gop_xlg_2_gig_mac_cfg(port); 3396 3397 /* pcs unreset */ 3398 gop_gpcs_reset(port, 0); 3399 /* mac unreset */ 3400 gop_gmac_reset(port, 0); 3401 break; 3402 3403 case PHY_INTERFACE_MODE_SFI: 3404 num_of_act_lanes = 2; 3405 mac_num = 0; 3406 /* configure PCS */ 3407 gop_xpcs_mode(port, num_of_act_lanes); 3408 gop_mpcs_mode(port); 3409 /* configure MAC */ 3410 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3411 3412 /* pcs unreset */ 3413 gop_xpcs_reset(port, 0); 3414 3415 /* mac unreset */ 3416 gop_xlg_mac_reset(port, 0); 3417 break; 3418 3419 default: 3420 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3421 __func__, port->phy_interface); 3422 return -1; 3423 } 3424 3425 return 0; 3426 } 3427 3428 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3429 { 3430 u32 val; 3431 3432 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3433 if (enable) { 3434 /* Enable port and MIB counters update */ 3435 val |= MVPP22_XLG_PORT_EN; 3436 val &= ~MVPP22_XLG_MIBCNT_DIS; 3437 } else { 3438 /* Disable port */ 3439 val &= ~MVPP22_XLG_PORT_EN; 3440 } 3441 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3442 } 3443 3444 static void gop_port_enable(struct mvpp2_port *port, int enable) 3445 { 3446 switch (port->phy_interface) { 3447 case PHY_INTERFACE_MODE_RGMII: 3448 case PHY_INTERFACE_MODE_RGMII_ID: 3449 case PHY_INTERFACE_MODE_SGMII: 3450 if (enable) 3451 mvpp2_port_enable(port); 3452 else 3453 mvpp2_port_disable(port); 3454 break; 3455 3456 case PHY_INTERFACE_MODE_SFI: 3457 gop_xlg_mac_port_enable(port, enable); 3458 3459 break; 3460 default: 3461 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3462 port->phy_interface); 3463 return; 3464 } 3465 } 3466 3467 /* RFU1 functions */ 3468 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3469 { 3470 return readl(priv->rfu1_base + offset); 3471 } 3472 3473 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3474 { 3475 writel(data, priv->rfu1_base + offset); 3476 } 3477 3478 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3479 { 3480 u32 val = 0; 3481 3482 if (gop_id == 2) { 3483 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3484 val |= MV_NETC_GE_MAC2_SGMII; 3485 } 3486 3487 if (gop_id == 3) { 3488 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3489 val |= MV_NETC_GE_MAC3_SGMII; 3490 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3491 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3492 val |= MV_NETC_GE_MAC3_RGMII; 3493 } 3494 3495 return val; 3496 } 3497 3498 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3499 { 3500 u32 reg; 3501 3502 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3503 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3504 3505 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3506 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3507 3508 reg |= val; 3509 3510 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3511 } 3512 3513 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3514 { 3515 u32 reg; 3516 3517 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3518 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3519 3520 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3521 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3522 3523 reg |= val; 3524 3525 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3526 } 3527 3528 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3529 { 3530 u32 reg; 3531 3532 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3533 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3534 3535 val <<= NETC_GOP_SOFT_RESET_OFFS; 3536 val &= NETC_GOP_SOFT_RESET_MASK; 3537 3538 reg |= val; 3539 3540 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3541 } 3542 3543 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3544 { 3545 u32 reg; 3546 3547 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3548 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3549 3550 val <<= NETC_CLK_DIV_PHASE_OFFS; 3551 val &= NETC_CLK_DIV_PHASE_MASK; 3552 3553 reg |= val; 3554 3555 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3556 } 3557 3558 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3559 { 3560 u32 reg; 3561 3562 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3563 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3564 3565 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3566 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3567 3568 reg |= val; 3569 3570 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3571 } 3572 3573 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3574 u32 val) 3575 { 3576 u32 reg, mask, offset; 3577 3578 if (gop_id == 2) { 3579 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3580 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3581 } else { 3582 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3583 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3584 } 3585 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3586 reg &= ~mask; 3587 3588 val <<= offset; 3589 val &= mask; 3590 3591 reg |= val; 3592 3593 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3594 } 3595 3596 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3597 { 3598 u32 reg; 3599 3600 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3601 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3602 3603 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3604 val &= NETC_BUS_WIDTH_SELECT_MASK; 3605 3606 reg |= val; 3607 3608 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3609 } 3610 3611 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3612 { 3613 u32 reg; 3614 3615 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3616 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3617 3618 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3619 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3620 3621 reg |= val; 3622 3623 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3624 } 3625 3626 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3627 enum mv_netc_phase phase) 3628 { 3629 switch (phase) { 3630 case MV_NETC_FIRST_PHASE: 3631 /* Set Bus Width to HB mode = 1 */ 3632 gop_netc_bus_width_select(priv, 1); 3633 /* Select RGMII mode */ 3634 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3635 break; 3636 3637 case MV_NETC_SECOND_PHASE: 3638 /* De-assert the relevant port HB reset */ 3639 gop_netc_port_rf_reset(priv, gop_id, 1); 3640 break; 3641 } 3642 } 3643 3644 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3645 enum mv_netc_phase phase) 3646 { 3647 switch (phase) { 3648 case MV_NETC_FIRST_PHASE: 3649 /* Set Bus Width to HB mode = 1 */ 3650 gop_netc_bus_width_select(priv, 1); 3651 /* Select SGMII mode */ 3652 if (gop_id >= 1) { 3653 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3654 MV_NETC_GBE_SGMII); 3655 } 3656 3657 /* Configure the sample stages */ 3658 gop_netc_sample_stages_timing(priv, 0); 3659 /* Configure the ComPhy Selector */ 3660 /* gop_netc_com_phy_selector_config(netComplex); */ 3661 break; 3662 3663 case MV_NETC_SECOND_PHASE: 3664 /* De-assert the relevant port HB reset */ 3665 gop_netc_port_rf_reset(priv, gop_id, 1); 3666 break; 3667 } 3668 } 3669 3670 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3671 { 3672 u32 c = priv->netc_config; 3673 3674 if (c & MV_NETC_GE_MAC2_SGMII) 3675 gop_netc_mac_to_sgmii(priv, 2, phase); 3676 else 3677 gop_netc_mac_to_xgmii(priv, 2, phase); 3678 3679 if (c & MV_NETC_GE_MAC3_SGMII) { 3680 gop_netc_mac_to_sgmii(priv, 3, phase); 3681 } else { 3682 gop_netc_mac_to_xgmii(priv, 3, phase); 3683 if (c & MV_NETC_GE_MAC3_RGMII) 3684 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3685 else 3686 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3687 } 3688 3689 /* Activate gop ports 0, 2, 3 */ 3690 gop_netc_active_port(priv, 0, 1); 3691 gop_netc_active_port(priv, 2, 1); 3692 gop_netc_active_port(priv, 3, 1); 3693 3694 if (phase == MV_NETC_SECOND_PHASE) { 3695 /* Enable the GOP internal clock logic */ 3696 gop_netc_gop_clock_logic_set(priv, 1); 3697 /* De-assert GOP unit reset */ 3698 gop_netc_gop_reset(priv, 1); 3699 } 3700 3701 return 0; 3702 } 3703 3704 /* Set defaults to the MVPP2 port */ 3705 static void mvpp2_defaults_set(struct mvpp2_port *port) 3706 { 3707 int tx_port_num, val, queue, ptxq, lrxq; 3708 3709 if (port->priv->hw_version == MVPP21) { 3710 /* Configure port to loopback if needed */ 3711 if (port->flags & MVPP2_F_LOOPBACK) 3712 mvpp2_port_loopback_set(port); 3713 3714 /* Update TX FIFO MIN Threshold */ 3715 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3716 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3717 /* Min. TX threshold must be less than minimal packet length */ 3718 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3719 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3720 } 3721 3722 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3723 tx_port_num = mvpp2_egress_port(port); 3724 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3725 tx_port_num); 3726 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3727 3728 /* Close bandwidth for all queues */ 3729 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3730 ptxq = mvpp2_txq_phys(port->id, queue); 3731 mvpp2_write(port->priv, 3732 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3733 } 3734 3735 /* Set refill period to 1 usec, refill tokens 3736 * and bucket size to maximum 3737 */ 3738 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3739 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3740 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3741 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3742 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3743 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3744 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3745 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3746 3747 /* Set MaximumLowLatencyPacketSize value to 256 */ 3748 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3749 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3750 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3751 3752 /* Enable Rx cache snoop */ 3753 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3754 queue = port->rxqs[lrxq]->id; 3755 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3756 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3757 MVPP2_SNOOP_BUF_HDR_MASK; 3758 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3759 } 3760 } 3761 3762 /* Enable/disable receiving packets */ 3763 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3764 { 3765 u32 val; 3766 int lrxq, queue; 3767 3768 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3769 queue = port->rxqs[lrxq]->id; 3770 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3771 val &= ~MVPP2_RXQ_DISABLE_MASK; 3772 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3773 } 3774 } 3775 3776 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3777 { 3778 u32 val; 3779 int lrxq, queue; 3780 3781 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3782 queue = port->rxqs[lrxq]->id; 3783 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3784 val |= MVPP2_RXQ_DISABLE_MASK; 3785 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3786 } 3787 } 3788 3789 /* Enable transmit via physical egress queue 3790 * - HW starts take descriptors from DRAM 3791 */ 3792 static void mvpp2_egress_enable(struct mvpp2_port *port) 3793 { 3794 u32 qmap; 3795 int queue; 3796 int tx_port_num = mvpp2_egress_port(port); 3797 3798 /* Enable all initialized TXs. */ 3799 qmap = 0; 3800 for (queue = 0; queue < txq_number; queue++) { 3801 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3802 3803 if (txq->descs != NULL) 3804 qmap |= (1 << queue); 3805 } 3806 3807 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3808 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3809 } 3810 3811 /* Disable transmit via physical egress queue 3812 * - HW doesn't take descriptors from DRAM 3813 */ 3814 static void mvpp2_egress_disable(struct mvpp2_port *port) 3815 { 3816 u32 reg_data; 3817 int delay; 3818 int tx_port_num = mvpp2_egress_port(port); 3819 3820 /* Issue stop command for active channels only */ 3821 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3822 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3823 MVPP2_TXP_SCHED_ENQ_MASK; 3824 if (reg_data != 0) 3825 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3826 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3827 3828 /* Wait for all Tx activity to terminate. */ 3829 delay = 0; 3830 do { 3831 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3832 netdev_warn(port->dev, 3833 "Tx stop timed out, status=0x%08x\n", 3834 reg_data); 3835 break; 3836 } 3837 mdelay(1); 3838 delay++; 3839 3840 /* Check port TX Command register that all 3841 * Tx queues are stopped 3842 */ 3843 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3844 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3845 } 3846 3847 /* Rx descriptors helper methods */ 3848 3849 /* Get number of Rx descriptors occupied by received packets */ 3850 static inline int 3851 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3852 { 3853 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3854 3855 return val & MVPP2_RXQ_OCCUPIED_MASK; 3856 } 3857 3858 /* Update Rx queue status with the number of occupied and available 3859 * Rx descriptor slots. 3860 */ 3861 static inline void 3862 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3863 int used_count, int free_count) 3864 { 3865 /* Decrement the number of used descriptors and increment count 3866 * increment the number of free descriptors. 3867 */ 3868 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3869 3870 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3871 } 3872 3873 /* Get pointer to next RX descriptor to be processed by SW */ 3874 static inline struct mvpp2_rx_desc * 3875 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3876 { 3877 int rx_desc = rxq->next_desc_to_proc; 3878 3879 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3880 prefetch(rxq->descs + rxq->next_desc_to_proc); 3881 return rxq->descs + rx_desc; 3882 } 3883 3884 /* Set rx queue offset */ 3885 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3886 int prxq, int offset) 3887 { 3888 u32 val; 3889 3890 /* Convert offset from bytes to units of 32 bytes */ 3891 offset = offset >> 5; 3892 3893 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3894 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3895 3896 /* Offset is in */ 3897 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3898 MVPP2_RXQ_PACKET_OFFSET_MASK); 3899 3900 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3901 } 3902 3903 /* Obtain BM cookie information from descriptor */ 3904 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3905 struct mvpp2_rx_desc *rx_desc) 3906 { 3907 int cpu = smp_processor_id(); 3908 int pool; 3909 3910 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3911 MVPP2_RXD_BM_POOL_ID_MASK) >> 3912 MVPP2_RXD_BM_POOL_ID_OFFS; 3913 3914 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3915 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3916 } 3917 3918 /* Tx descriptors helper methods */ 3919 3920 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3921 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3922 struct mvpp2_tx_queue *txq) 3923 { 3924 u32 val; 3925 3926 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3927 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3928 3929 return val & MVPP2_TXQ_PENDING_MASK; 3930 } 3931 3932 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3933 static struct mvpp2_tx_desc * 3934 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3935 { 3936 int tx_desc = txq->next_desc_to_proc; 3937 3938 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3939 return txq->descs + tx_desc; 3940 } 3941 3942 /* Update HW with number of aggregated Tx descriptors to be sent */ 3943 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3944 { 3945 /* aggregated access - relevant TXQ number is written in TX desc */ 3946 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3947 } 3948 3949 /* Get number of sent descriptors and decrement counter. 3950 * The number of sent descriptors is returned. 3951 * Per-CPU access 3952 */ 3953 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3954 struct mvpp2_tx_queue *txq) 3955 { 3956 u32 val; 3957 3958 /* Reading status reg resets transmitted descriptor counter */ 3959 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3960 3961 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3962 MVPP2_TRANSMITTED_COUNT_OFFSET; 3963 } 3964 3965 static void mvpp2_txq_sent_counter_clear(void *arg) 3966 { 3967 struct mvpp2_port *port = arg; 3968 int queue; 3969 3970 for (queue = 0; queue < txq_number; queue++) { 3971 int id = port->txqs[queue]->id; 3972 3973 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3974 } 3975 } 3976 3977 /* Set max sizes for Tx queues */ 3978 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3979 { 3980 u32 val, size, mtu; 3981 int txq, tx_port_num; 3982 3983 mtu = port->pkt_size * 8; 3984 if (mtu > MVPP2_TXP_MTU_MAX) 3985 mtu = MVPP2_TXP_MTU_MAX; 3986 3987 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3988 mtu = 3 * mtu; 3989 3990 /* Indirect access to registers */ 3991 tx_port_num = mvpp2_egress_port(port); 3992 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3993 3994 /* Set MTU */ 3995 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 3996 val &= ~MVPP2_TXP_MTU_MAX; 3997 val |= mtu; 3998 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 3999 4000 /* TXP token size and all TXQs token size must be larger that MTU */ 4001 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4002 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4003 if (size < mtu) { 4004 size = mtu; 4005 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4006 val |= size; 4007 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4008 } 4009 4010 for (txq = 0; txq < txq_number; txq++) { 4011 val = mvpp2_read(port->priv, 4012 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4013 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4014 4015 if (size < mtu) { 4016 size = mtu; 4017 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4018 val |= size; 4019 mvpp2_write(port->priv, 4020 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4021 val); 4022 } 4023 } 4024 } 4025 4026 /* Free Tx queue skbuffs */ 4027 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4028 struct mvpp2_tx_queue *txq, 4029 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4030 { 4031 int i; 4032 4033 for (i = 0; i < num; i++) 4034 mvpp2_txq_inc_get(txq_pcpu); 4035 } 4036 4037 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4038 u32 cause) 4039 { 4040 int queue = fls(cause) - 1; 4041 4042 return port->rxqs[queue]; 4043 } 4044 4045 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4046 u32 cause) 4047 { 4048 int queue = fls(cause) - 1; 4049 4050 return port->txqs[queue]; 4051 } 4052 4053 /* Rx/Tx queue initialization/cleanup methods */ 4054 4055 /* Allocate and initialize descriptors for aggr TXQ */ 4056 static int mvpp2_aggr_txq_init(struct udevice *dev, 4057 struct mvpp2_tx_queue *aggr_txq, 4058 int desc_num, int cpu, 4059 struct mvpp2 *priv) 4060 { 4061 u32 txq_dma; 4062 4063 /* Allocate memory for TX descriptors */ 4064 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4065 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4066 if (!aggr_txq->descs) 4067 return -ENOMEM; 4068 4069 /* Make sure descriptor address is cache line size aligned */ 4070 BUG_ON(aggr_txq->descs != 4071 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4072 4073 aggr_txq->last_desc = aggr_txq->size - 1; 4074 4075 /* Aggr TXQ no reset WA */ 4076 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4077 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4078 4079 /* Set Tx descriptors queue starting address indirect 4080 * access 4081 */ 4082 if (priv->hw_version == MVPP21) 4083 txq_dma = aggr_txq->descs_dma; 4084 else 4085 txq_dma = aggr_txq->descs_dma >> 4086 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4087 4088 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4089 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4090 4091 return 0; 4092 } 4093 4094 /* Create a specified Rx queue */ 4095 static int mvpp2_rxq_init(struct mvpp2_port *port, 4096 struct mvpp2_rx_queue *rxq) 4097 4098 { 4099 u32 rxq_dma; 4100 4101 rxq->size = port->rx_ring_size; 4102 4103 /* Allocate memory for RX descriptors */ 4104 rxq->descs = buffer_loc.rx_descs; 4105 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4106 if (!rxq->descs) 4107 return -ENOMEM; 4108 4109 BUG_ON(rxq->descs != 4110 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4111 4112 rxq->last_desc = rxq->size - 1; 4113 4114 /* Zero occupied and non-occupied counters - direct access */ 4115 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4116 4117 /* Set Rx descriptors queue starting address - indirect access */ 4118 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4119 if (port->priv->hw_version == MVPP21) 4120 rxq_dma = rxq->descs_dma; 4121 else 4122 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4123 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4124 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4125 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4126 4127 /* Set Offset */ 4128 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4129 4130 /* Add number of descriptors ready for receiving packets */ 4131 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4132 4133 return 0; 4134 } 4135 4136 /* Push packets received by the RXQ to BM pool */ 4137 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4138 struct mvpp2_rx_queue *rxq) 4139 { 4140 int rx_received, i; 4141 4142 rx_received = mvpp2_rxq_received(port, rxq->id); 4143 if (!rx_received) 4144 return; 4145 4146 for (i = 0; i < rx_received; i++) { 4147 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4148 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4149 4150 mvpp2_pool_refill(port, bm, 4151 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4152 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4153 } 4154 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4155 } 4156 4157 /* Cleanup Rx queue */ 4158 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4159 struct mvpp2_rx_queue *rxq) 4160 { 4161 mvpp2_rxq_drop_pkts(port, rxq); 4162 4163 rxq->descs = NULL; 4164 rxq->last_desc = 0; 4165 rxq->next_desc_to_proc = 0; 4166 rxq->descs_dma = 0; 4167 4168 /* Clear Rx descriptors queue starting address and size; 4169 * free descriptor number 4170 */ 4171 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4172 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4173 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4174 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4175 } 4176 4177 /* Create and initialize a Tx queue */ 4178 static int mvpp2_txq_init(struct mvpp2_port *port, 4179 struct mvpp2_tx_queue *txq) 4180 { 4181 u32 val; 4182 int cpu, desc, desc_per_txq, tx_port_num; 4183 struct mvpp2_txq_pcpu *txq_pcpu; 4184 4185 txq->size = port->tx_ring_size; 4186 4187 /* Allocate memory for Tx descriptors */ 4188 txq->descs = buffer_loc.tx_descs; 4189 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4190 if (!txq->descs) 4191 return -ENOMEM; 4192 4193 /* Make sure descriptor address is cache line size aligned */ 4194 BUG_ON(txq->descs != 4195 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4196 4197 txq->last_desc = txq->size - 1; 4198 4199 /* Set Tx descriptors queue starting address - indirect access */ 4200 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4201 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4202 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4203 MVPP2_TXQ_DESC_SIZE_MASK); 4204 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4205 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4206 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4207 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4208 val &= ~MVPP2_TXQ_PENDING_MASK; 4209 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4210 4211 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4212 * for each existing TXQ. 4213 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4214 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4215 */ 4216 desc_per_txq = 16; 4217 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4218 (txq->log_id * desc_per_txq); 4219 4220 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4221 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4222 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4223 4224 /* WRR / EJP configuration - indirect access */ 4225 tx_port_num = mvpp2_egress_port(port); 4226 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4227 4228 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4229 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4230 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4231 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4232 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4233 4234 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4235 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4236 val); 4237 4238 for_each_present_cpu(cpu) { 4239 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4240 txq_pcpu->size = txq->size; 4241 } 4242 4243 return 0; 4244 } 4245 4246 /* Free allocated TXQ resources */ 4247 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4248 struct mvpp2_tx_queue *txq) 4249 { 4250 txq->descs = NULL; 4251 txq->last_desc = 0; 4252 txq->next_desc_to_proc = 0; 4253 txq->descs_dma = 0; 4254 4255 /* Set minimum bandwidth for disabled TXQs */ 4256 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4257 4258 /* Set Tx descriptors queue starting address and size */ 4259 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4260 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4261 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4262 } 4263 4264 /* Cleanup Tx ports */ 4265 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4266 { 4267 struct mvpp2_txq_pcpu *txq_pcpu; 4268 int delay, pending, cpu; 4269 u32 val; 4270 4271 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4272 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4273 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4274 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4275 4276 /* The napi queue has been stopped so wait for all packets 4277 * to be transmitted. 4278 */ 4279 delay = 0; 4280 do { 4281 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4282 netdev_warn(port->dev, 4283 "port %d: cleaning queue %d timed out\n", 4284 port->id, txq->log_id); 4285 break; 4286 } 4287 mdelay(1); 4288 delay++; 4289 4290 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4291 } while (pending); 4292 4293 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4294 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4295 4296 for_each_present_cpu(cpu) { 4297 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4298 4299 /* Release all packets */ 4300 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4301 4302 /* Reset queue */ 4303 txq_pcpu->count = 0; 4304 txq_pcpu->txq_put_index = 0; 4305 txq_pcpu->txq_get_index = 0; 4306 } 4307 } 4308 4309 /* Cleanup all Tx queues */ 4310 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4311 { 4312 struct mvpp2_tx_queue *txq; 4313 int queue; 4314 u32 val; 4315 4316 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4317 4318 /* Reset Tx ports and delete Tx queues */ 4319 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4320 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4321 4322 for (queue = 0; queue < txq_number; queue++) { 4323 txq = port->txqs[queue]; 4324 mvpp2_txq_clean(port, txq); 4325 mvpp2_txq_deinit(port, txq); 4326 } 4327 4328 mvpp2_txq_sent_counter_clear(port); 4329 4330 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4331 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4332 } 4333 4334 /* Cleanup all Rx queues */ 4335 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4336 { 4337 int queue; 4338 4339 for (queue = 0; queue < rxq_number; queue++) 4340 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4341 } 4342 4343 /* Init all Rx queues for port */ 4344 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4345 { 4346 int queue, err; 4347 4348 for (queue = 0; queue < rxq_number; queue++) { 4349 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4350 if (err) 4351 goto err_cleanup; 4352 } 4353 return 0; 4354 4355 err_cleanup: 4356 mvpp2_cleanup_rxqs(port); 4357 return err; 4358 } 4359 4360 /* Init all tx queues for port */ 4361 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4362 { 4363 struct mvpp2_tx_queue *txq; 4364 int queue, err; 4365 4366 for (queue = 0; queue < txq_number; queue++) { 4367 txq = port->txqs[queue]; 4368 err = mvpp2_txq_init(port, txq); 4369 if (err) 4370 goto err_cleanup; 4371 } 4372 4373 mvpp2_txq_sent_counter_clear(port); 4374 return 0; 4375 4376 err_cleanup: 4377 mvpp2_cleanup_txqs(port); 4378 return err; 4379 } 4380 4381 /* Adjust link */ 4382 static void mvpp2_link_event(struct mvpp2_port *port) 4383 { 4384 struct phy_device *phydev = port->phy_dev; 4385 int status_change = 0; 4386 u32 val; 4387 4388 if (phydev->link) { 4389 if ((port->speed != phydev->speed) || 4390 (port->duplex != phydev->duplex)) { 4391 u32 val; 4392 4393 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4394 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4395 MVPP2_GMAC_CONFIG_GMII_SPEED | 4396 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4397 MVPP2_GMAC_AN_SPEED_EN | 4398 MVPP2_GMAC_AN_DUPLEX_EN); 4399 4400 if (phydev->duplex) 4401 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4402 4403 if (phydev->speed == SPEED_1000) 4404 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4405 else if (phydev->speed == SPEED_100) 4406 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4407 4408 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4409 4410 port->duplex = phydev->duplex; 4411 port->speed = phydev->speed; 4412 } 4413 } 4414 4415 if (phydev->link != port->link) { 4416 if (!phydev->link) { 4417 port->duplex = -1; 4418 port->speed = 0; 4419 } 4420 4421 port->link = phydev->link; 4422 status_change = 1; 4423 } 4424 4425 if (status_change) { 4426 if (phydev->link) { 4427 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4428 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4429 MVPP2_GMAC_FORCE_LINK_DOWN); 4430 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4431 mvpp2_egress_enable(port); 4432 mvpp2_ingress_enable(port); 4433 } else { 4434 mvpp2_ingress_disable(port); 4435 mvpp2_egress_disable(port); 4436 } 4437 } 4438 } 4439 4440 /* Main RX/TX processing routines */ 4441 4442 /* Display more error info */ 4443 static void mvpp2_rx_error(struct mvpp2_port *port, 4444 struct mvpp2_rx_desc *rx_desc) 4445 { 4446 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4447 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4448 4449 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4450 case MVPP2_RXD_ERR_CRC: 4451 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4452 status, sz); 4453 break; 4454 case MVPP2_RXD_ERR_OVERRUN: 4455 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4456 status, sz); 4457 break; 4458 case MVPP2_RXD_ERR_RESOURCE: 4459 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4460 status, sz); 4461 break; 4462 } 4463 } 4464 4465 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4466 static int mvpp2_rx_refill(struct mvpp2_port *port, 4467 struct mvpp2_bm_pool *bm_pool, 4468 u32 bm, dma_addr_t dma_addr) 4469 { 4470 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4471 return 0; 4472 } 4473 4474 /* Set hw internals when starting port */ 4475 static void mvpp2_start_dev(struct mvpp2_port *port) 4476 { 4477 switch (port->phy_interface) { 4478 case PHY_INTERFACE_MODE_RGMII: 4479 case PHY_INTERFACE_MODE_RGMII_ID: 4480 case PHY_INTERFACE_MODE_SGMII: 4481 mvpp2_gmac_max_rx_size_set(port); 4482 default: 4483 break; 4484 } 4485 4486 mvpp2_txp_max_tx_size_set(port); 4487 4488 if (port->priv->hw_version == MVPP21) 4489 mvpp2_port_enable(port); 4490 else 4491 gop_port_enable(port, 1); 4492 } 4493 4494 /* Set hw internals when stopping port */ 4495 static void mvpp2_stop_dev(struct mvpp2_port *port) 4496 { 4497 /* Stop new packets from arriving to RXQs */ 4498 mvpp2_ingress_disable(port); 4499 4500 mvpp2_egress_disable(port); 4501 4502 if (port->priv->hw_version == MVPP21) 4503 mvpp2_port_disable(port); 4504 else 4505 gop_port_enable(port, 0); 4506 } 4507 4508 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4509 { 4510 struct phy_device *phy_dev; 4511 4512 if (!port->init || port->link == 0) { 4513 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 4514 port->phy_interface); 4515 port->phy_dev = phy_dev; 4516 if (!phy_dev) { 4517 netdev_err(port->dev, "cannot connect to phy\n"); 4518 return -ENODEV; 4519 } 4520 phy_dev->supported &= PHY_GBIT_FEATURES; 4521 phy_dev->advertising = phy_dev->supported; 4522 4523 port->phy_dev = phy_dev; 4524 port->link = 0; 4525 port->duplex = 0; 4526 port->speed = 0; 4527 4528 phy_config(phy_dev); 4529 phy_startup(phy_dev); 4530 if (!phy_dev->link) { 4531 printf("%s: No link\n", phy_dev->dev->name); 4532 return -1; 4533 } 4534 4535 port->init = 1; 4536 } else { 4537 mvpp2_egress_enable(port); 4538 mvpp2_ingress_enable(port); 4539 } 4540 4541 return 0; 4542 } 4543 4544 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4545 { 4546 unsigned char mac_bcast[ETH_ALEN] = { 4547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4548 int err; 4549 4550 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4551 if (err) { 4552 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4553 return err; 4554 } 4555 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4556 port->dev_addr, true); 4557 if (err) { 4558 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4559 return err; 4560 } 4561 err = mvpp2_prs_def_flow(port); 4562 if (err) { 4563 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4564 return err; 4565 } 4566 4567 /* Allocate the Rx/Tx queues */ 4568 err = mvpp2_setup_rxqs(port); 4569 if (err) { 4570 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4571 return err; 4572 } 4573 4574 err = mvpp2_setup_txqs(port); 4575 if (err) { 4576 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4577 return err; 4578 } 4579 4580 if (port->phy_node) { 4581 err = mvpp2_phy_connect(dev, port); 4582 if (err < 0) 4583 return err; 4584 4585 mvpp2_link_event(port); 4586 } else { 4587 mvpp2_egress_enable(port); 4588 mvpp2_ingress_enable(port); 4589 } 4590 4591 mvpp2_start_dev(port); 4592 4593 return 0; 4594 } 4595 4596 /* No Device ops here in U-Boot */ 4597 4598 /* Driver initialization */ 4599 4600 static void mvpp2_port_power_up(struct mvpp2_port *port) 4601 { 4602 struct mvpp2 *priv = port->priv; 4603 4604 /* On PPv2.2 the GoP / interface configuration has already been done */ 4605 if (priv->hw_version == MVPP21) 4606 mvpp2_port_mii_set(port); 4607 mvpp2_port_periodic_xon_disable(port); 4608 if (priv->hw_version == MVPP21) 4609 mvpp2_port_fc_adv_enable(port); 4610 mvpp2_port_reset(port); 4611 } 4612 4613 /* Initialize port HW */ 4614 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4615 { 4616 struct mvpp2 *priv = port->priv; 4617 struct mvpp2_txq_pcpu *txq_pcpu; 4618 int queue, cpu, err; 4619 4620 if (port->first_rxq + rxq_number > 4621 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4622 return -EINVAL; 4623 4624 /* Disable port */ 4625 mvpp2_egress_disable(port); 4626 if (priv->hw_version == MVPP21) 4627 mvpp2_port_disable(port); 4628 else 4629 gop_port_enable(port, 0); 4630 4631 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4632 GFP_KERNEL); 4633 if (!port->txqs) 4634 return -ENOMEM; 4635 4636 /* Associate physical Tx queues to this port and initialize. 4637 * The mapping is predefined. 4638 */ 4639 for (queue = 0; queue < txq_number; queue++) { 4640 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4641 struct mvpp2_tx_queue *txq; 4642 4643 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4644 if (!txq) 4645 return -ENOMEM; 4646 4647 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4648 GFP_KERNEL); 4649 if (!txq->pcpu) 4650 return -ENOMEM; 4651 4652 txq->id = queue_phy_id; 4653 txq->log_id = queue; 4654 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4655 for_each_present_cpu(cpu) { 4656 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4657 txq_pcpu->cpu = cpu; 4658 } 4659 4660 port->txqs[queue] = txq; 4661 } 4662 4663 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4664 GFP_KERNEL); 4665 if (!port->rxqs) 4666 return -ENOMEM; 4667 4668 /* Allocate and initialize Rx queue for this port */ 4669 for (queue = 0; queue < rxq_number; queue++) { 4670 struct mvpp2_rx_queue *rxq; 4671 4672 /* Map physical Rx queue to port's logical Rx queue */ 4673 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4674 if (!rxq) 4675 return -ENOMEM; 4676 /* Map this Rx queue to a physical queue */ 4677 rxq->id = port->first_rxq + queue; 4678 rxq->port = port->id; 4679 rxq->logic_rxq = queue; 4680 4681 port->rxqs[queue] = rxq; 4682 } 4683 4684 /* Configure Rx queue group interrupt for this port */ 4685 if (priv->hw_version == MVPP21) { 4686 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 4687 CONFIG_MV_ETH_RXQ); 4688 } else { 4689 u32 val; 4690 4691 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 4692 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 4693 4694 val = (CONFIG_MV_ETH_RXQ << 4695 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 4696 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 4697 } 4698 4699 /* Create Rx descriptor rings */ 4700 for (queue = 0; queue < rxq_number; queue++) { 4701 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4702 4703 rxq->size = port->rx_ring_size; 4704 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4705 rxq->time_coal = MVPP2_RX_COAL_USEC; 4706 } 4707 4708 mvpp2_ingress_disable(port); 4709 4710 /* Port default configuration */ 4711 mvpp2_defaults_set(port); 4712 4713 /* Port's classifier configuration */ 4714 mvpp2_cls_oversize_rxq_set(port); 4715 mvpp2_cls_port_config(port); 4716 4717 /* Provide an initial Rx packet size */ 4718 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4719 4720 /* Initialize pools for swf */ 4721 err = mvpp2_swf_bm_pool_init(port); 4722 if (err) 4723 return err; 4724 4725 return 0; 4726 } 4727 4728 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4729 { 4730 int port_node = dev_of_offset(dev); 4731 const char *phy_mode_str; 4732 int phy_node, mdio_off, cp_node; 4733 u32 id; 4734 u32 phyaddr = 0; 4735 int phy_mode = -1; 4736 u64 mdio_addr; 4737 4738 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4739 4740 if (phy_node > 0) { 4741 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4742 if (phyaddr < 0) { 4743 dev_err(&pdev->dev, "could not find phy address\n"); 4744 return -1; 4745 } 4746 mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node); 4747 4748 /* TODO: This WA for mdio issue. U-boot 2017 don't have 4749 * mdio driver and on MACHIATOBin board ports from CP1 4750 * connected to mdio on CP0. 4751 * WA is to get mdio address from phy handler parent 4752 * base address. WA should be removed after 4753 * mdio driver implementation. 4754 */ 4755 mdio_addr = fdtdec_get_uint(gd->fdt_blob, 4756 mdio_off, "reg", 0); 4757 4758 cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off); 4759 mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob, 4760 cp_node); 4761 4762 port->priv->mdio_base = (void *)mdio_addr; 4763 4764 if (port->priv->mdio_base < 0) { 4765 dev_err(&pdev->dev, "could not find mdio base address\n"); 4766 return -1; 4767 } 4768 } else { 4769 phy_node = 0; 4770 } 4771 4772 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4773 if (phy_mode_str) 4774 phy_mode = phy_get_interface_by_name(phy_mode_str); 4775 if (phy_mode == -1) { 4776 dev_err(&pdev->dev, "incorrect phy mode\n"); 4777 return -EINVAL; 4778 } 4779 4780 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4781 if (id == -1) { 4782 dev_err(&pdev->dev, "missing port-id value\n"); 4783 return -EINVAL; 4784 } 4785 4786 #ifdef CONFIG_DM_GPIO 4787 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4788 &port->phy_reset_gpio, GPIOD_IS_OUT); 4789 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4790 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4791 #endif 4792 4793 /* 4794 * ToDo: 4795 * Not sure if this DT property "phy-speed" will get accepted, so 4796 * this might change later 4797 */ 4798 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4799 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4800 "phy-speed", 1000); 4801 4802 port->id = id; 4803 if (port->priv->hw_version == MVPP21) 4804 port->first_rxq = port->id * rxq_number; 4805 else 4806 port->first_rxq = port->id * port->priv->max_port_rxqs; 4807 port->phy_node = phy_node; 4808 port->phy_interface = phy_mode; 4809 port->phyaddr = phyaddr; 4810 4811 return 0; 4812 } 4813 4814 #ifdef CONFIG_DM_GPIO 4815 /* Port GPIO initialization */ 4816 static void mvpp2_gpio_init(struct mvpp2_port *port) 4817 { 4818 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4819 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4820 udelay(1000); 4821 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4822 } 4823 4824 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4825 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4826 } 4827 #endif 4828 4829 /* Ports initialization */ 4830 static int mvpp2_port_probe(struct udevice *dev, 4831 struct mvpp2_port *port, 4832 int port_node, 4833 struct mvpp2 *priv) 4834 { 4835 int err; 4836 4837 port->tx_ring_size = MVPP2_MAX_TXD; 4838 port->rx_ring_size = MVPP2_MAX_RXD; 4839 4840 err = mvpp2_port_init(dev, port); 4841 if (err < 0) { 4842 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4843 return err; 4844 } 4845 mvpp2_port_power_up(port); 4846 4847 #ifdef CONFIG_DM_GPIO 4848 mvpp2_gpio_init(port); 4849 #endif 4850 4851 priv->port_list[port->id] = port; 4852 priv->num_ports++; 4853 return 0; 4854 } 4855 4856 /* Initialize decoding windows */ 4857 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4858 struct mvpp2 *priv) 4859 { 4860 u32 win_enable; 4861 int i; 4862 4863 for (i = 0; i < 6; i++) { 4864 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4865 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4866 4867 if (i < 4) 4868 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4869 } 4870 4871 win_enable = 0; 4872 4873 for (i = 0; i < dram->num_cs; i++) { 4874 const struct mbus_dram_window *cs = dram->cs + i; 4875 4876 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4877 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4878 dram->mbus_dram_target_id); 4879 4880 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4881 (cs->size - 1) & 0xffff0000); 4882 4883 win_enable |= (1 << i); 4884 } 4885 4886 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4887 } 4888 4889 /* Initialize Rx FIFO's */ 4890 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4891 { 4892 int port; 4893 4894 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4895 if (priv->hw_version == MVPP22) { 4896 if (port == 0) { 4897 mvpp2_write(priv, 4898 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4899 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4900 mvpp2_write(priv, 4901 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4902 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4903 } else if (port == 1) { 4904 mvpp2_write(priv, 4905 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4906 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4907 mvpp2_write(priv, 4908 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4909 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4910 } else { 4911 mvpp2_write(priv, 4912 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4913 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4914 mvpp2_write(priv, 4915 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4916 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4917 } 4918 } else { 4919 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4920 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4921 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4922 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4923 } 4924 } 4925 4926 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4927 MVPP2_RX_FIFO_PORT_MIN_PKT); 4928 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4929 } 4930 4931 /* Initialize Tx FIFO's */ 4932 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4933 { 4934 int port, val; 4935 4936 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4937 /* Port 0 supports 10KB TX FIFO */ 4938 if (port == 0) { 4939 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4940 MVPP22_TX_FIFO_SIZE_MASK; 4941 } else { 4942 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4943 MVPP22_TX_FIFO_SIZE_MASK; 4944 } 4945 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4946 } 4947 } 4948 4949 static void mvpp2_axi_init(struct mvpp2 *priv) 4950 { 4951 u32 val, rdval, wrval; 4952 4953 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4954 4955 /* AXI Bridge Configuration */ 4956 4957 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4958 << MVPP22_AXI_ATTR_CACHE_OFFS; 4959 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4960 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4961 4962 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4963 << MVPP22_AXI_ATTR_CACHE_OFFS; 4964 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4965 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4966 4967 /* BM */ 4968 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4969 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4970 4971 /* Descriptors */ 4972 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4973 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4974 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4975 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4976 4977 /* Buffer Data */ 4978 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4979 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4980 4981 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4982 << MVPP22_AXI_CODE_CACHE_OFFS; 4983 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4984 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4985 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4986 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4987 4988 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4989 << MVPP22_AXI_CODE_CACHE_OFFS; 4990 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4991 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4992 4993 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4994 4995 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4996 << MVPP22_AXI_CODE_CACHE_OFFS; 4997 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4998 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4999 5000 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 5001 } 5002 5003 /* Initialize network controller common part HW */ 5004 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 5005 { 5006 const struct mbus_dram_target_info *dram_target_info; 5007 int err, i; 5008 u32 val; 5009 5010 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 5011 if ((rxq_number > priv->max_port_rxqs) || 5012 (txq_number > MVPP2_MAX_TXQ)) { 5013 dev_err(&pdev->dev, "invalid queue size parameter\n"); 5014 return -EINVAL; 5015 } 5016 5017 /* MBUS windows configuration */ 5018 dram_target_info = mvebu_mbus_dram_info(); 5019 if (dram_target_info) 5020 mvpp2_conf_mbus_windows(dram_target_info, priv); 5021 5022 if (priv->hw_version == MVPP22) 5023 mvpp2_axi_init(priv); 5024 5025 if (priv->hw_version == MVPP21) { 5026 /* Disable HW PHY polling */ 5027 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5028 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5029 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5030 } else { 5031 /* Enable HW PHY polling */ 5032 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5033 val |= MVPP22_SMI_POLLING_EN; 5034 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5035 } 5036 5037 /* Allocate and initialize aggregated TXQs */ 5038 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5039 sizeof(struct mvpp2_tx_queue), 5040 GFP_KERNEL); 5041 if (!priv->aggr_txqs) 5042 return -ENOMEM; 5043 5044 for_each_present_cpu(i) { 5045 priv->aggr_txqs[i].id = i; 5046 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5047 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5048 MVPP2_AGGR_TXQ_SIZE, i, priv); 5049 if (err < 0) 5050 return err; 5051 } 5052 5053 /* Rx Fifo Init */ 5054 mvpp2_rx_fifo_init(priv); 5055 5056 /* Tx Fifo Init */ 5057 if (priv->hw_version == MVPP22) 5058 mvpp2_tx_fifo_init(priv); 5059 5060 /* Reset Rx queue group interrupt configuration */ 5061 for (i = 0; i < MVPP2_MAX_PORTS; i++) { 5062 if (priv->hw_version == MVPP21) { 5063 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), 5064 CONFIG_MV_ETH_RXQ); 5065 continue; 5066 } else { 5067 u32 val; 5068 5069 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 5070 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5071 5072 val = (CONFIG_MV_ETH_RXQ << 5073 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 5074 mvpp2_write(priv, 5075 MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5076 } 5077 } 5078 5079 if (priv->hw_version == MVPP21) 5080 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5081 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5082 5083 /* Allow cache snoop when transmiting packets */ 5084 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5085 5086 /* Buffer Manager initialization */ 5087 err = mvpp2_bm_init(dev, priv); 5088 if (err < 0) 5089 return err; 5090 5091 /* Parser default initialization */ 5092 err = mvpp2_prs_default_init(dev, priv); 5093 if (err < 0) 5094 return err; 5095 5096 /* Classifier default initialization */ 5097 mvpp2_cls_init(priv); 5098 5099 return 0; 5100 } 5101 5102 /* SMI / MDIO functions */ 5103 5104 static int smi_wait_ready(struct mvpp2 *priv) 5105 { 5106 u32 timeout = MVPP2_SMI_TIMEOUT; 5107 u32 smi_reg; 5108 5109 /* wait till the SMI is not busy */ 5110 do { 5111 /* read smi register */ 5112 smi_reg = readl(priv->mdio_base); 5113 if (timeout-- == 0) { 5114 printf("Error: SMI busy timeout\n"); 5115 return -EFAULT; 5116 } 5117 } while (smi_reg & MVPP2_SMI_BUSY); 5118 5119 return 0; 5120 } 5121 5122 /* 5123 * mpp2_mdio_read - miiphy_read callback function. 5124 * 5125 * Returns 16bit phy register value, or 0xffff on error 5126 */ 5127 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5128 { 5129 struct mvpp2 *priv = bus->priv; 5130 u32 smi_reg; 5131 u32 timeout; 5132 5133 /* check parameters */ 5134 if (addr > MVPP2_PHY_ADDR_MASK) { 5135 printf("Error: Invalid PHY address %d\n", addr); 5136 return -EFAULT; 5137 } 5138 5139 if (reg > MVPP2_PHY_REG_MASK) { 5140 printf("Err: Invalid register offset %d\n", reg); 5141 return -EFAULT; 5142 } 5143 5144 /* wait till the SMI is not busy */ 5145 if (smi_wait_ready(priv) < 0) 5146 return -EFAULT; 5147 5148 /* fill the phy address and regiser offset and read opcode */ 5149 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5150 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5151 | MVPP2_SMI_OPCODE_READ; 5152 5153 /* write the smi register */ 5154 writel(smi_reg, priv->mdio_base); 5155 5156 /* wait till read value is ready */ 5157 timeout = MVPP2_SMI_TIMEOUT; 5158 5159 do { 5160 /* read smi register */ 5161 smi_reg = readl(priv->mdio_base); 5162 if (timeout-- == 0) { 5163 printf("Err: SMI read ready timeout\n"); 5164 return -EFAULT; 5165 } 5166 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5167 5168 /* Wait for the data to update in the SMI register */ 5169 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5170 ; 5171 5172 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5173 } 5174 5175 /* 5176 * mpp2_mdio_write - miiphy_write callback function. 5177 * 5178 * Returns 0 if write succeed, -EINVAL on bad parameters 5179 * -ETIME on timeout 5180 */ 5181 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5182 u16 value) 5183 { 5184 struct mvpp2 *priv = bus->priv; 5185 u32 smi_reg; 5186 5187 /* check parameters */ 5188 if (addr > MVPP2_PHY_ADDR_MASK) { 5189 printf("Error: Invalid PHY address %d\n", addr); 5190 return -EFAULT; 5191 } 5192 5193 if (reg > MVPP2_PHY_REG_MASK) { 5194 printf("Err: Invalid register offset %d\n", reg); 5195 return -EFAULT; 5196 } 5197 5198 /* wait till the SMI is not busy */ 5199 if (smi_wait_ready(priv) < 0) 5200 return -EFAULT; 5201 5202 /* fill the phy addr and reg offset and write opcode and data */ 5203 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5204 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5205 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5206 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5207 5208 /* write the smi register */ 5209 writel(smi_reg, priv->mdio_base); 5210 5211 return 0; 5212 } 5213 5214 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5215 { 5216 struct mvpp2_port *port = dev_get_priv(dev); 5217 struct mvpp2_rx_desc *rx_desc; 5218 struct mvpp2_bm_pool *bm_pool; 5219 dma_addr_t dma_addr; 5220 u32 bm, rx_status; 5221 int pool, rx_bytes, err; 5222 int rx_received; 5223 struct mvpp2_rx_queue *rxq; 5224 u32 cause_rx_tx, cause_rx, cause_misc; 5225 u8 *data; 5226 5227 cause_rx_tx = mvpp2_read(port->priv, 5228 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5229 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5230 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5231 if (!cause_rx_tx && !cause_misc) 5232 return 0; 5233 5234 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5235 5236 /* Process RX packets */ 5237 cause_rx |= port->pending_cause_rx; 5238 rxq = mvpp2_get_rx_queue(port, cause_rx); 5239 5240 /* Get number of received packets and clamp the to-do */ 5241 rx_received = mvpp2_rxq_received(port, rxq->id); 5242 5243 /* Return if no packets are received */ 5244 if (!rx_received) 5245 return 0; 5246 5247 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5248 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5249 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5250 rx_bytes -= MVPP2_MH_SIZE; 5251 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5252 5253 bm = mvpp2_bm_cookie_build(port, rx_desc); 5254 pool = mvpp2_bm_cookie_pool_get(bm); 5255 bm_pool = &port->priv->bm_pools[pool]; 5256 5257 /* In case of an error, release the requested buffer pointer 5258 * to the Buffer Manager. This request process is controlled 5259 * by the hardware, and the information about the buffer is 5260 * comprised by the RX descriptor. 5261 */ 5262 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5263 mvpp2_rx_error(port, rx_desc); 5264 /* Return the buffer to the pool */ 5265 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5266 return 0; 5267 } 5268 5269 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5270 if (err) { 5271 netdev_err(port->dev, "failed to refill BM pools\n"); 5272 return 0; 5273 } 5274 5275 /* Update Rx queue management counters */ 5276 mb(); 5277 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5278 5279 /* give packet to stack - skip on first n bytes */ 5280 data = (u8 *)dma_addr + 2 + 32; 5281 5282 if (rx_bytes <= 0) 5283 return 0; 5284 5285 /* 5286 * No cache invalidation needed here, since the rx_buffer's are 5287 * located in a uncached memory region 5288 */ 5289 *packetp = data; 5290 5291 return rx_bytes; 5292 } 5293 5294 /* Drain Txq */ 5295 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 5296 int enable) 5297 { 5298 u32 val; 5299 5300 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5301 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 5302 if (enable) 5303 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5304 else 5305 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5306 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 5307 } 5308 5309 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5310 { 5311 struct mvpp2_port *port = dev_get_priv(dev); 5312 struct mvpp2_tx_queue *txq, *aggr_txq; 5313 struct mvpp2_tx_desc *tx_desc; 5314 int tx_done; 5315 int timeout; 5316 5317 txq = port->txqs[0]; 5318 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5319 5320 /* Get a descriptor for the first part of the packet */ 5321 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5322 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5323 mvpp2_txdesc_size_set(port, tx_desc, length); 5324 mvpp2_txdesc_offset_set(port, tx_desc, 5325 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5326 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5327 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5328 /* First and Last descriptor */ 5329 mvpp2_txdesc_cmd_set(port, tx_desc, 5330 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5331 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5332 5333 /* Flush tx data */ 5334 flush_dcache_range((unsigned long)packet, 5335 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5336 5337 /* Enable transmit */ 5338 mb(); 5339 mvpp2_aggr_txq_pend_desc_add(port, 1); 5340 5341 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5342 5343 timeout = 0; 5344 do { 5345 if (timeout++ > 10000) { 5346 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5347 return 0; 5348 } 5349 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5350 } while (tx_done); 5351 5352 /* Enable TXQ drain */ 5353 mvpp2_txq_drain(port, txq, 1); 5354 5355 timeout = 0; 5356 do { 5357 if (timeout++ > 10000) { 5358 printf("timeout: packet not sent\n"); 5359 return 0; 5360 } 5361 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5362 } while (!tx_done); 5363 5364 /* Disable TXQ drain */ 5365 mvpp2_txq_drain(port, txq, 0); 5366 5367 return 0; 5368 } 5369 5370 static int mvpp2_start(struct udevice *dev) 5371 { 5372 struct eth_pdata *pdata = dev_get_platdata(dev); 5373 struct mvpp2_port *port = dev_get_priv(dev); 5374 5375 /* Load current MAC address */ 5376 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5377 5378 /* Reconfigure parser accept the original MAC address */ 5379 mvpp2_prs_update_mac_da(port, port->dev_addr); 5380 5381 switch (port->phy_interface) { 5382 case PHY_INTERFACE_MODE_RGMII: 5383 case PHY_INTERFACE_MODE_RGMII_ID: 5384 case PHY_INTERFACE_MODE_SGMII: 5385 mvpp2_port_power_up(port); 5386 default: 5387 break; 5388 } 5389 5390 mvpp2_open(dev, port); 5391 5392 return 0; 5393 } 5394 5395 static void mvpp2_stop(struct udevice *dev) 5396 { 5397 struct mvpp2_port *port = dev_get_priv(dev); 5398 5399 mvpp2_stop_dev(port); 5400 mvpp2_cleanup_rxqs(port); 5401 mvpp2_cleanup_txqs(port); 5402 } 5403 5404 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5405 { 5406 writel(port->phyaddr, port->priv->iface_base + 5407 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5408 5409 return 0; 5410 } 5411 5412 static int mvpp2_base_probe(struct udevice *dev) 5413 { 5414 struct mvpp2 *priv = dev_get_priv(dev); 5415 struct mii_dev *bus; 5416 void *bd_space; 5417 u32 size = 0; 5418 int i; 5419 5420 /* Save hw-version */ 5421 priv->hw_version = dev_get_driver_data(dev); 5422 5423 /* 5424 * U-Boot special buffer handling: 5425 * 5426 * Allocate buffer area for descs and rx_buffers. This is only 5427 * done once for all interfaces. As only one interface can 5428 * be active. Make this area DMA-safe by disabling the D-cache 5429 */ 5430 5431 /* Align buffer area for descs and rx_buffers to 1MiB */ 5432 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5433 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5434 BD_SPACE, DCACHE_OFF); 5435 5436 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5437 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5438 5439 buffer_loc.tx_descs = 5440 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5441 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5442 5443 buffer_loc.rx_descs = 5444 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5445 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5446 5447 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5448 buffer_loc.bm_pool[i] = 5449 (unsigned long *)((unsigned long)bd_space + size); 5450 if (priv->hw_version == MVPP21) 5451 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5452 else 5453 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5454 } 5455 5456 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5457 buffer_loc.rx_buffer[i] = 5458 (unsigned long *)((unsigned long)bd_space + size); 5459 size += RX_BUFFER_SIZE; 5460 } 5461 5462 /* Clear the complete area so that all descriptors are cleared */ 5463 memset(bd_space, 0, size); 5464 5465 /* Save base addresses for later use */ 5466 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5467 if (IS_ERR(priv->base)) 5468 return PTR_ERR(priv->base); 5469 5470 if (priv->hw_version == MVPP21) { 5471 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5472 if (IS_ERR(priv->lms_base)) 5473 return PTR_ERR(priv->lms_base); 5474 5475 priv->mdio_base = priv->lms_base + MVPP21_SMI; 5476 } else { 5477 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5478 if (IS_ERR(priv->iface_base)) 5479 return PTR_ERR(priv->iface_base); 5480 5481 priv->mdio_base = priv->iface_base + MVPP22_SMI; 5482 5483 /* Store common base addresses for all ports */ 5484 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5485 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5486 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5487 } 5488 5489 if (priv->hw_version == MVPP21) 5490 priv->max_port_rxqs = 8; 5491 else 5492 priv->max_port_rxqs = 32; 5493 5494 /* Finally create and register the MDIO bus driver */ 5495 bus = mdio_alloc(); 5496 if (!bus) { 5497 printf("Failed to allocate MDIO bus\n"); 5498 return -ENOMEM; 5499 } 5500 5501 bus->read = mpp2_mdio_read; 5502 bus->write = mpp2_mdio_write; 5503 snprintf(bus->name, sizeof(bus->name), dev->name); 5504 bus->priv = (void *)priv; 5505 priv->bus = bus; 5506 5507 return mdio_register(bus); 5508 } 5509 5510 static int mvpp2_probe(struct udevice *dev) 5511 { 5512 struct mvpp2_port *port = dev_get_priv(dev); 5513 struct mvpp2 *priv = dev_get_priv(dev->parent); 5514 int err; 5515 5516 /* Only call the probe function for the parent once */ 5517 if (!priv->probe_done) 5518 err = mvpp2_base_probe(dev->parent); 5519 5520 port->priv = dev_get_priv(dev->parent); 5521 5522 err = phy_info_parse(dev, port); 5523 if (err) 5524 return err; 5525 5526 /* 5527 * We need the port specific io base addresses at this stage, since 5528 * gop_port_init() accesses these registers 5529 */ 5530 if (priv->hw_version == MVPP21) { 5531 int priv_common_regs_num = 2; 5532 5533 port->base = (void __iomem *)devfdt_get_addr_index( 5534 dev->parent, priv_common_regs_num + port->id); 5535 if (IS_ERR(port->base)) 5536 return PTR_ERR(port->base); 5537 } else { 5538 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5539 "gop-port-id", -1); 5540 if (port->id == -1) { 5541 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5542 return -EINVAL; 5543 } 5544 5545 port->base = priv->iface_base + MVPP22_PORT_BASE + 5546 port->gop_id * MVPP22_PORT_OFFSET; 5547 5548 /* Set phy address of the port */ 5549 if(port->phy_node) 5550 mvpp22_smi_phy_addr_cfg(port); 5551 5552 /* GoP Init */ 5553 gop_port_init(port); 5554 } 5555 5556 if (!priv->probe_done) { 5557 /* Initialize network controller */ 5558 err = mvpp2_init(dev, priv); 5559 if (err < 0) { 5560 dev_err(&pdev->dev, "failed to initialize controller\n"); 5561 return err; 5562 } 5563 priv->num_ports = 0; 5564 priv->probe_done = 1; 5565 } 5566 5567 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5568 if (err) 5569 return err; 5570 5571 if (priv->hw_version == MVPP22) { 5572 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5573 port->phy_interface); 5574 5575 /* Netcomplex configurations for all ports */ 5576 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5577 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5578 } 5579 5580 return 0; 5581 } 5582 5583 /* 5584 * Empty BM pool and stop its activity before the OS is started 5585 */ 5586 static int mvpp2_remove(struct udevice *dev) 5587 { 5588 struct mvpp2_port *port = dev_get_priv(dev); 5589 struct mvpp2 *priv = port->priv; 5590 int i; 5591 5592 priv->num_ports--; 5593 5594 if (priv->num_ports) 5595 return 0; 5596 5597 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5598 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5599 5600 return 0; 5601 } 5602 5603 static const struct eth_ops mvpp2_ops = { 5604 .start = mvpp2_start, 5605 .send = mvpp2_send, 5606 .recv = mvpp2_recv, 5607 .stop = mvpp2_stop, 5608 }; 5609 5610 static struct driver mvpp2_driver = { 5611 .name = "mvpp2", 5612 .id = UCLASS_ETH, 5613 .probe = mvpp2_probe, 5614 .remove = mvpp2_remove, 5615 .ops = &mvpp2_ops, 5616 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5617 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5618 .flags = DM_FLAG_ACTIVE_DMA, 5619 }; 5620 5621 /* 5622 * Use a MISC device to bind the n instances (child nodes) of the 5623 * network base controller in UCLASS_ETH. 5624 */ 5625 static int mvpp2_base_bind(struct udevice *parent) 5626 { 5627 const void *blob = gd->fdt_blob; 5628 int node = dev_of_offset(parent); 5629 struct uclass_driver *drv; 5630 struct udevice *dev; 5631 struct eth_pdata *plat; 5632 char *name; 5633 int subnode; 5634 u32 id; 5635 int base_id_add; 5636 5637 /* Lookup eth driver */ 5638 drv = lists_uclass_lookup(UCLASS_ETH); 5639 if (!drv) { 5640 puts("Cannot find eth driver\n"); 5641 return -ENOENT; 5642 } 5643 5644 base_id_add = base_id; 5645 5646 fdt_for_each_subnode(subnode, blob, node) { 5647 /* Increment base_id for all subnodes, also the disabled ones */ 5648 base_id++; 5649 5650 /* Skip disabled ports */ 5651 if (!fdtdec_get_is_enabled(blob, subnode)) 5652 continue; 5653 5654 plat = calloc(1, sizeof(*plat)); 5655 if (!plat) 5656 return -ENOMEM; 5657 5658 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5659 id += base_id_add; 5660 5661 name = calloc(1, 16); 5662 sprintf(name, "mvpp2-%d", id); 5663 5664 /* Create child device UCLASS_ETH and bind it */ 5665 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5666 dev_set_of_offset(dev, subnode); 5667 } 5668 5669 return 0; 5670 } 5671 5672 static const struct udevice_id mvpp2_ids[] = { 5673 { 5674 .compatible = "marvell,armada-375-pp2", 5675 .data = MVPP21, 5676 }, 5677 { 5678 .compatible = "marvell,armada-7k-pp22", 5679 .data = MVPP22, 5680 }, 5681 { } 5682 }; 5683 5684 U_BOOT_DRIVER(mvpp2_base) = { 5685 .name = "mvpp2_base", 5686 .id = UCLASS_MISC, 5687 .of_match = mvpp2_ids, 5688 .bind = mvpp2_base_bind, 5689 .priv_auto_alloc_size = sizeof(struct mvpp2), 5690 }; 5691