1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* Some linux -> U-Boot compatibility stuff */ 39 #define netdev_err(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_warn(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_info(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 #define netdev_dbg(dev, fmt, args...) \ 46 printf(fmt, ##args) 47 48 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 49 50 #define __verify_pcpu_ptr(ptr) \ 51 do { \ 52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 53 (void)__vpp_verify; \ 54 } while (0) 55 56 #define VERIFY_PERCPU_PTR(__p) \ 57 ({ \ 58 __verify_pcpu_ptr(__p); \ 59 (typeof(*(__p)) __kernel __force *)(__p); \ 60 }) 61 62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 63 #define smp_processor_id() 0 64 #define num_present_cpus() 1 65 #define for_each_present_cpu(cpu) \ 66 for ((cpu) = 0; (cpu) < 1; (cpu)++) 67 68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 69 70 #define CONFIG_NR_CPUS 1 71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 72 73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 74 #define WRAP (2 + ETH_HLEN + 4 + 32) 75 #define MTU 1500 76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 77 78 #define MVPP2_SMI_TIMEOUT 10000 79 80 /* RX Fifo Registers */ 81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 84 #define MVPP2_RX_FIFO_INIT_REG 0x64 85 86 /* RX DMA Top Registers */ 87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 98 #define MVPP2_RXQ_POOL_LONG_OFFS 24 99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 103 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 104 105 /* Parser Registers */ 106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 107 #define MVPP2_PRS_PORT_LU_MAX 0xf 108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 123 124 /* Classifier Registers */ 125 #define MVPP2_CLS_MODE_REG 0x1800 126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 127 #define MVPP2_CLS_PORT_WAY_REG 0x1810 128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 131 #define MVPP2_CLS_LKP_TBL_REG 0x1818 132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 144 145 /* Descriptor Manager Top Registers */ 146 #define MVPP2_RXQ_NUM_REG 0x2040 147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 148 #define MVPP22_DESC_ADDR_OFFS 8 149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 158 #define MVPP2_RXQ_THRESH_REG 0x204c 159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 161 #define MVPP2_RXQ_INDEX_REG 0x2050 162 #define MVPP2_TXQ_NUM_REG 0x2080 163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 167 #define MVPP2_TXQ_THRESH_REG 0x2094 168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 170 #define MVPP2_TXQ_INDEX_REG 0x2098 171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 177 #define MVPP2_TXQ_PENDING_REG 0x20a0 178 #define MVPP2_TXQ_PENDING_MASK 0x3fff 179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 196 197 /* MBUS bridge registers */ 198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 201 #define MVPP2_BASE_ADDR_ENABLE 0x4060 202 203 /* AXI Bridge Registers */ 204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 216 217 /* Values for AXI Bridge registers */ 218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 220 221 #define MVPP22_AXI_CODE_CACHE_OFFS 0 222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 223 224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 227 228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 230 231 /* Interrupt Cause and Mask registers */ 232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 234 235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 239 240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 242 243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 247 248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 266 267 /* Buffer Manager registers */ 268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 281 #define MVPP2_BM_START_MASK BIT(0) 282 #define MVPP2_BM_STOP_MASK BIT(1) 283 #define MVPP2_BM_STATE_MASK BIT(4) 284 #define MVPP2_BM_LOW_THRESH_OFFS 8 285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 287 MVPP2_BM_LOW_THRESH_OFFS) 288 #define MVPP2_BM_HIGH_THRESH_OFFS 16 289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 291 MVPP2_BM_HIGH_THRESH_OFFS) 292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 311 #define MVPP21_BM_MC_RLS_REG 0x64c4 312 #define MVPP2_BM_MC_ID_MASK 0xfff 313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 318 #define MVPP22_BM_MC_RLS_REG 0x64d4 319 320 /* TX Scheduler registers */ 321 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 322 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 323 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 324 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 325 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 326 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 327 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 328 #define MVPP2_TXP_MTU_MAX 0x7FFFF 329 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 330 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 331 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 332 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 333 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 334 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 335 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 336 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 337 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 338 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 339 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 340 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 341 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 342 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 343 344 /* TX general registers */ 345 #define MVPP2_TX_SNOOP_REG 0x8800 346 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 347 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 348 349 /* LMS registers */ 350 #define MVPP2_SRC_ADDR_MIDDLE 0x24 351 #define MVPP2_SRC_ADDR_HIGH 0x28 352 #define MVPP2_PHY_AN_CFG0_REG 0x34 353 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 354 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 355 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 356 357 /* Per-port registers */ 358 #define MVPP2_GMAC_CTRL_0_REG 0x0 359 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 360 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 361 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 362 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 363 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 364 #define MVPP2_GMAC_CTRL_1_REG 0x4 365 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 366 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 367 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 368 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 369 #define MVPP2_GMAC_SA_LOW_OFFS 7 370 #define MVPP2_GMAC_CTRL_2_REG 0x8 371 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 372 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 373 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 374 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 375 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 376 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 377 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 378 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 379 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 380 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 381 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 382 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 383 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 384 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 385 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 386 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 387 #define MVPP2_GMAC_EN_FC_AN BIT(11) 388 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 389 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 390 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 391 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 392 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 393 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 395 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 396 #define MVPP2_GMAC_CTRL_4_REG 0x90 397 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 398 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 399 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 400 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 401 402 /* 403 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 404 * relative to port->base. 405 */ 406 407 /* Port Mac Control0 */ 408 #define MVPP22_XLG_CTRL0_REG 0x100 409 #define MVPP22_XLG_PORT_EN BIT(0) 410 #define MVPP22_XLG_MAC_RESETN BIT(1) 411 #define MVPP22_XLG_RX_FC_EN BIT(7) 412 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 413 /* Port Mac Control1 */ 414 #define MVPP22_XLG_CTRL1_REG 0x104 415 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 416 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 417 /* Port Interrupt Mask */ 418 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 419 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 420 /* Port Mac Control3 */ 421 #define MVPP22_XLG_CTRL3_REG 0x11c 422 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 423 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 424 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 425 /* Port Mac Control4 */ 426 #define MVPP22_XLG_CTRL4_REG 0x184 427 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 428 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 429 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 430 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 431 432 /* XPCS registers */ 433 434 /* Global Configuration 0 */ 435 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 436 #define MVPP22_XPCS_PCSRESET BIT(0) 437 #define MVPP22_XPCS_PCSMODE_OFFS 3 438 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 439 MVPP22_XPCS_PCSMODE_OFFS) 440 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 441 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 442 MVPP22_XPCS_LANEACTIVE_OFFS) 443 444 /* MPCS registers */ 445 446 #define PCS40G_COMMON_CONTROL 0x14 447 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 448 449 #define PCS_CLOCK_RESET 0x14c 450 #define TX_SD_CLK_RESET_MASK BIT(0) 451 #define RX_SD_CLK_RESET_MASK BIT(1) 452 #define MAC_CLK_RESET_MASK BIT(2) 453 #define CLK_DIVISION_RATIO_OFFS 4 454 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 455 #define CLK_DIV_PHASE_SET_MASK BIT(11) 456 457 /* System Soft Reset 1 */ 458 #define GOP_SOFT_RESET_1_REG 0x108 459 #define NETC_GOP_SOFT_RESET_OFFS 6 460 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 461 NETC_GOP_SOFT_RESET_OFFS) 462 463 /* Ports Control 0 */ 464 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 465 #define NETC_BUS_WIDTH_SELECT_OFFS 1 466 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 467 NETC_BUS_WIDTH_SELECT_OFFS) 468 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 469 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 470 NETC_GIG_RX_DATA_SAMPLE_OFFS) 471 #define NETC_CLK_DIV_PHASE_OFFS 31 472 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 473 /* Ports Control 1 */ 474 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 475 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 476 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 477 NETC_PORTS_ACTIVE_OFFSET(p)) 478 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 479 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 480 NETC_PORT_GIG_RF_RESET_OFFS(p)) 481 #define NETCOMP_CONTROL_0_REG 0x120 482 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 483 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 484 NETC_GBE_PORT0_SGMII_MODE_OFFS) 485 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 486 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 487 NETC_GBE_PORT1_SGMII_MODE_OFFS) 488 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 489 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 490 NETC_GBE_PORT1_MII_MODE_OFFS) 491 492 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 493 #define MVPP22_SMI_POLLING_EN BIT(10) 494 495 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 496 (0x4 * (port))) 497 498 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 499 500 /* Descriptor ring Macros */ 501 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 502 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 503 504 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 505 #define MVPP21_SMI 0x0054 506 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 507 #define MVPP22_SMI 0x1200 508 #define MVPP2_PHY_REG_MASK 0x1f 509 /* SMI register fields */ 510 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 511 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 512 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 513 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 514 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 515 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 516 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 517 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 518 519 #define MVPP2_PHY_ADDR_MASK 0x1f 520 #define MVPP2_PHY_REG_MASK 0x1f 521 522 /* Additional PPv2.2 offsets */ 523 #define MVPP22_MPCS 0x007000 524 #define MVPP22_XPCS 0x007400 525 #define MVPP22_PORT_BASE 0x007e00 526 #define MVPP22_PORT_OFFSET 0x001000 527 #define MVPP22_RFU1 0x318000 528 529 /* Maximum number of ports */ 530 #define MVPP22_GOP_MAC_NUM 4 531 532 /* Sets the field located at the specified in data */ 533 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 534 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 535 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 536 537 /* Net Complex */ 538 enum mv_netc_topology { 539 MV_NETC_GE_MAC2_SGMII = BIT(0), 540 MV_NETC_GE_MAC3_SGMII = BIT(1), 541 MV_NETC_GE_MAC3_RGMII = BIT(2), 542 }; 543 544 enum mv_netc_phase { 545 MV_NETC_FIRST_PHASE, 546 MV_NETC_SECOND_PHASE, 547 }; 548 549 enum mv_netc_sgmii_xmi_mode { 550 MV_NETC_GBE_SGMII, 551 MV_NETC_GBE_XMII, 552 }; 553 554 enum mv_netc_mii_mode { 555 MV_NETC_GBE_RGMII, 556 MV_NETC_GBE_MII, 557 }; 558 559 enum mv_netc_lanes { 560 MV_NETC_LANE_23, 561 MV_NETC_LANE_45, 562 }; 563 564 /* Various constants */ 565 566 /* Coalescing */ 567 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 568 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 569 #define MVPP2_RX_COAL_PKTS 32 570 #define MVPP2_RX_COAL_USEC 100 571 572 /* The two bytes Marvell header. Either contains a special value used 573 * by Marvell switches when a specific hardware mode is enabled (not 574 * supported by this driver) or is filled automatically by zeroes on 575 * the RX side. Those two bytes being at the front of the Ethernet 576 * header, they allow to have the IP header aligned on a 4 bytes 577 * boundary automatically: the hardware skips those two bytes on its 578 * own. 579 */ 580 #define MVPP2_MH_SIZE 2 581 #define MVPP2_ETH_TYPE_LEN 2 582 #define MVPP2_PPPOE_HDR_SIZE 8 583 #define MVPP2_VLAN_TAG_LEN 4 584 585 /* Lbtd 802.3 type */ 586 #define MVPP2_IP_LBDT_TYPE 0xfffa 587 588 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 589 #define MVPP2_TX_CSUM_MAX_SIZE 9800 590 591 /* Timeout constants */ 592 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 593 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 594 595 #define MVPP2_TX_MTU_MAX 0x7ffff 596 597 /* Maximum number of T-CONTs of PON port */ 598 #define MVPP2_MAX_TCONT 16 599 600 /* Maximum number of supported ports */ 601 #define MVPP2_MAX_PORTS 4 602 603 /* Maximum number of TXQs used by single port */ 604 #define MVPP2_MAX_TXQ 8 605 606 /* Default number of TXQs in use */ 607 #define MVPP2_DEFAULT_TXQ 1 608 609 /* Dfault number of RXQs in use */ 610 #define MVPP2_DEFAULT_RXQ 1 611 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 612 613 /* Max number of Rx descriptors */ 614 #define MVPP2_MAX_RXD 16 615 616 /* Max number of Tx descriptors */ 617 #define MVPP2_MAX_TXD 16 618 619 /* Amount of Tx descriptors that can be reserved at once by CPU */ 620 #define MVPP2_CPU_DESC_CHUNK 64 621 622 /* Max number of Tx descriptors in each aggregated queue */ 623 #define MVPP2_AGGR_TXQ_SIZE 256 624 625 /* Descriptor aligned size */ 626 #define MVPP2_DESC_ALIGNED_SIZE 32 627 628 /* Descriptor alignment mask */ 629 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 630 631 /* RX FIFO constants */ 632 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 633 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 634 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 635 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 636 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 637 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 638 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 639 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 640 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 641 642 /* TX general registers */ 643 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 644 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 645 646 /* TX FIFO constants */ 647 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 648 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 649 650 /* RX buffer constants */ 651 #define MVPP2_SKB_SHINFO_SIZE \ 652 0 653 654 #define MVPP2_RX_PKT_SIZE(mtu) \ 655 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 656 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 657 658 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 659 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 660 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 661 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 662 663 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 664 665 /* IPv6 max L3 address size */ 666 #define MVPP2_MAX_L3_ADDR_SIZE 16 667 668 /* Port flags */ 669 #define MVPP2_F_LOOPBACK BIT(0) 670 671 /* Marvell tag types */ 672 enum mvpp2_tag_type { 673 MVPP2_TAG_TYPE_NONE = 0, 674 MVPP2_TAG_TYPE_MH = 1, 675 MVPP2_TAG_TYPE_DSA = 2, 676 MVPP2_TAG_TYPE_EDSA = 3, 677 MVPP2_TAG_TYPE_VLAN = 4, 678 MVPP2_TAG_TYPE_LAST = 5 679 }; 680 681 /* Parser constants */ 682 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 683 #define MVPP2_PRS_TCAM_WORDS 6 684 #define MVPP2_PRS_SRAM_WORDS 4 685 #define MVPP2_PRS_FLOW_ID_SIZE 64 686 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 687 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 688 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 689 #define MVPP2_PRS_IPV4_HEAD 0x40 690 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 691 #define MVPP2_PRS_IPV4_MC 0xe0 692 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 693 #define MVPP2_PRS_IPV4_BC_MASK 0xff 694 #define MVPP2_PRS_IPV4_IHL 0x5 695 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 696 #define MVPP2_PRS_IPV6_MC 0xff 697 #define MVPP2_PRS_IPV6_MC_MASK 0xff 698 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 699 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 700 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 701 #define MVPP2_PRS_DBL_VLANS_MAX 100 702 703 /* Tcam structure: 704 * - lookup ID - 4 bits 705 * - port ID - 1 byte 706 * - additional information - 1 byte 707 * - header data - 8 bytes 708 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 709 */ 710 #define MVPP2_PRS_AI_BITS 8 711 #define MVPP2_PRS_PORT_MASK 0xff 712 #define MVPP2_PRS_LU_MASK 0xf 713 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 714 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 715 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 716 (((offs) * 2) - ((offs) % 2) + 2) 717 #define MVPP2_PRS_TCAM_AI_BYTE 16 718 #define MVPP2_PRS_TCAM_PORT_BYTE 17 719 #define MVPP2_PRS_TCAM_LU_BYTE 20 720 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 721 #define MVPP2_PRS_TCAM_INV_WORD 5 722 /* Tcam entries ID */ 723 #define MVPP2_PE_DROP_ALL 0 724 #define MVPP2_PE_FIRST_FREE_TID 1 725 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 726 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 727 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 728 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 729 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 730 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 731 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 732 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 733 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 734 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 735 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 736 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 737 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 738 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 739 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 740 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 741 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 742 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 743 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 744 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 745 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 746 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 747 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 748 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 749 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 750 751 /* Sram structure 752 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 753 */ 754 #define MVPP2_PRS_SRAM_RI_OFFS 0 755 #define MVPP2_PRS_SRAM_RI_WORD 0 756 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 757 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 758 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 759 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 760 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 761 #define MVPP2_PRS_SRAM_UDF_OFFS 73 762 #define MVPP2_PRS_SRAM_UDF_BITS 8 763 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 764 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 765 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 766 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 767 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 768 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 769 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 770 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 774 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 775 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 780 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 781 #define MVPP2_PRS_SRAM_AI_OFFS 90 782 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 783 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 784 #define MVPP2_PRS_SRAM_AI_MASK 0xff 785 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 786 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 787 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 788 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 789 790 /* Sram result info bits assignment */ 791 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 792 #define MVPP2_PRS_RI_DSA_MASK 0x2 793 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 794 #define MVPP2_PRS_RI_VLAN_NONE 0x0 795 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 796 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 797 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 798 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 799 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 800 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 801 #define MVPP2_PRS_RI_L2_UCAST 0x0 802 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 803 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 804 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 805 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 806 #define MVPP2_PRS_RI_L3_UN 0x0 807 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 808 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 809 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 810 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 811 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 812 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 813 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 814 #define MVPP2_PRS_RI_L3_UCAST 0x0 815 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 816 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 817 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 818 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 819 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 820 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 821 #define MVPP2_PRS_RI_L4_TCP BIT(22) 822 #define MVPP2_PRS_RI_L4_UDP BIT(23) 823 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 824 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 825 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 826 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 827 828 /* Sram additional info bits assignment */ 829 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 830 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 831 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 832 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 833 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 834 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 835 #define MVPP2_PRS_SINGLE_VLAN_AI 0 836 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 837 838 /* DSA/EDSA type */ 839 #define MVPP2_PRS_TAGGED true 840 #define MVPP2_PRS_UNTAGGED false 841 #define MVPP2_PRS_EDSA true 842 #define MVPP2_PRS_DSA false 843 844 /* MAC entries, shadow udf */ 845 enum mvpp2_prs_udf { 846 MVPP2_PRS_UDF_MAC_DEF, 847 MVPP2_PRS_UDF_MAC_RANGE, 848 MVPP2_PRS_UDF_L2_DEF, 849 MVPP2_PRS_UDF_L2_DEF_COPY, 850 MVPP2_PRS_UDF_L2_USER, 851 }; 852 853 /* Lookup ID */ 854 enum mvpp2_prs_lookup { 855 MVPP2_PRS_LU_MH, 856 MVPP2_PRS_LU_MAC, 857 MVPP2_PRS_LU_DSA, 858 MVPP2_PRS_LU_VLAN, 859 MVPP2_PRS_LU_L2, 860 MVPP2_PRS_LU_PPPOE, 861 MVPP2_PRS_LU_IP4, 862 MVPP2_PRS_LU_IP6, 863 MVPP2_PRS_LU_FLOWS, 864 MVPP2_PRS_LU_LAST, 865 }; 866 867 /* L3 cast enum */ 868 enum mvpp2_prs_l3_cast { 869 MVPP2_PRS_L3_UNI_CAST, 870 MVPP2_PRS_L3_MULTI_CAST, 871 MVPP2_PRS_L3_BROAD_CAST 872 }; 873 874 /* Classifier constants */ 875 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 876 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 877 #define MVPP2_CLS_LKP_TBL_SIZE 64 878 879 /* BM constants */ 880 #define MVPP2_BM_POOLS_NUM 1 881 #define MVPP2_BM_LONG_BUF_NUM 16 882 #define MVPP2_BM_SHORT_BUF_NUM 16 883 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 884 #define MVPP2_BM_POOL_PTR_ALIGN 128 885 #define MVPP2_BM_SWF_LONG_POOL(port) 0 886 887 /* BM cookie (32 bits) definition */ 888 #define MVPP2_BM_COOKIE_POOL_OFFS 8 889 #define MVPP2_BM_COOKIE_CPU_OFFS 24 890 891 /* BM short pool packet size 892 * These value assure that for SWF the total number 893 * of bytes allocated for each buffer will be 512 894 */ 895 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 896 897 enum mvpp2_bm_type { 898 MVPP2_BM_FREE, 899 MVPP2_BM_SWF_LONG, 900 MVPP2_BM_SWF_SHORT 901 }; 902 903 /* Definitions */ 904 905 /* Shared Packet Processor resources */ 906 struct mvpp2 { 907 /* Shared registers' base addresses */ 908 void __iomem *base; 909 void __iomem *lms_base; 910 void __iomem *iface_base; 911 void __iomem *mdio_base; 912 913 void __iomem *mpcs_base; 914 void __iomem *xpcs_base; 915 void __iomem *rfu1_base; 916 917 u32 netc_config; 918 919 /* List of pointers to port structures */ 920 struct mvpp2_port **port_list; 921 922 /* Aggregated TXQs */ 923 struct mvpp2_tx_queue *aggr_txqs; 924 925 /* BM pools */ 926 struct mvpp2_bm_pool *bm_pools; 927 928 /* PRS shadow table */ 929 struct mvpp2_prs_shadow *prs_shadow; 930 /* PRS auxiliary table for double vlan entries control */ 931 bool *prs_double_vlans; 932 933 /* Tclk value */ 934 u32 tclk; 935 936 /* HW version */ 937 enum { MVPP21, MVPP22 } hw_version; 938 939 /* Maximum number of RXQs per port */ 940 unsigned int max_port_rxqs; 941 942 struct mii_dev *bus; 943 944 int probe_done; 945 }; 946 947 struct mvpp2_pcpu_stats { 948 u64 rx_packets; 949 u64 rx_bytes; 950 u64 tx_packets; 951 u64 tx_bytes; 952 }; 953 954 struct mvpp2_port { 955 u8 id; 956 957 /* Index of the port from the "group of ports" complex point 958 * of view 959 */ 960 int gop_id; 961 962 int irq; 963 964 struct mvpp2 *priv; 965 966 /* Per-port registers' base address */ 967 void __iomem *base; 968 969 struct mvpp2_rx_queue **rxqs; 970 struct mvpp2_tx_queue **txqs; 971 972 int pkt_size; 973 974 u32 pending_cause_rx; 975 976 /* Per-CPU port control */ 977 struct mvpp2_port_pcpu __percpu *pcpu; 978 979 /* Flags */ 980 unsigned long flags; 981 982 u16 tx_ring_size; 983 u16 rx_ring_size; 984 struct mvpp2_pcpu_stats __percpu *stats; 985 986 struct phy_device *phy_dev; 987 phy_interface_t phy_interface; 988 int phy_node; 989 int phyaddr; 990 #ifdef CONFIG_DM_GPIO 991 struct gpio_desc phy_reset_gpio; 992 struct gpio_desc phy_tx_disable_gpio; 993 #endif 994 int init; 995 unsigned int link; 996 unsigned int duplex; 997 unsigned int speed; 998 999 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 1000 1001 struct mvpp2_bm_pool *pool_long; 1002 struct mvpp2_bm_pool *pool_short; 1003 1004 /* Index of first port's physical RXQ */ 1005 u8 first_rxq; 1006 1007 u8 dev_addr[ETH_ALEN]; 1008 }; 1009 1010 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1011 * layout of the transmit and reception DMA descriptors, and their 1012 * layout is therefore defined by the hardware design 1013 */ 1014 1015 #define MVPP2_TXD_L3_OFF_SHIFT 0 1016 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1017 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1018 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1019 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1020 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1021 #define MVPP2_TXD_L4_UDP BIT(24) 1022 #define MVPP2_TXD_L3_IP6 BIT(26) 1023 #define MVPP2_TXD_L_DESC BIT(28) 1024 #define MVPP2_TXD_F_DESC BIT(29) 1025 1026 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1027 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1028 #define MVPP2_RXD_ERR_CRC 0x0 1029 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1030 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1031 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1032 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1033 #define MVPP2_RXD_HWF_SYNC BIT(21) 1034 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1035 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1036 #define MVPP2_RXD_L4_TCP BIT(25) 1037 #define MVPP2_RXD_L4_UDP BIT(26) 1038 #define MVPP2_RXD_L3_IP4 BIT(28) 1039 #define MVPP2_RXD_L3_IP6 BIT(30) 1040 #define MVPP2_RXD_BUF_HDR BIT(31) 1041 1042 /* HW TX descriptor for PPv2.1 */ 1043 struct mvpp21_tx_desc { 1044 u32 command; /* Options used by HW for packet transmitting.*/ 1045 u8 packet_offset; /* the offset from the buffer beginning */ 1046 u8 phys_txq; /* destination queue ID */ 1047 u16 data_size; /* data size of transmitted packet in bytes */ 1048 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1049 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1050 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1051 u32 reserved2; /* reserved (for future use) */ 1052 }; 1053 1054 /* HW RX descriptor for PPv2.1 */ 1055 struct mvpp21_rx_desc { 1056 u32 status; /* info about received packet */ 1057 u16 reserved1; /* parser_info (for future use, PnC) */ 1058 u16 data_size; /* size of received packet in bytes */ 1059 u32 buf_dma_addr; /* physical address of the buffer */ 1060 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1061 u16 reserved2; /* gem_port_id (for future use, PON) */ 1062 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1063 u8 reserved4; /* bm_qset (for future use, BM) */ 1064 u8 reserved5; 1065 u16 reserved6; /* classify_info (for future use, PnC) */ 1066 u32 reserved7; /* flow_id (for future use, PnC) */ 1067 u32 reserved8; 1068 }; 1069 1070 /* HW TX descriptor for PPv2.2 */ 1071 struct mvpp22_tx_desc { 1072 u32 command; 1073 u8 packet_offset; 1074 u8 phys_txq; 1075 u16 data_size; 1076 u64 reserved1; 1077 u64 buf_dma_addr_ptp; 1078 u64 buf_cookie_misc; 1079 }; 1080 1081 /* HW RX descriptor for PPv2.2 */ 1082 struct mvpp22_rx_desc { 1083 u32 status; 1084 u16 reserved1; 1085 u16 data_size; 1086 u32 reserved2; 1087 u32 reserved3; 1088 u64 buf_dma_addr_key_hash; 1089 u64 buf_cookie_misc; 1090 }; 1091 1092 /* Opaque type used by the driver to manipulate the HW TX and RX 1093 * descriptors 1094 */ 1095 struct mvpp2_tx_desc { 1096 union { 1097 struct mvpp21_tx_desc pp21; 1098 struct mvpp22_tx_desc pp22; 1099 }; 1100 }; 1101 1102 struct mvpp2_rx_desc { 1103 union { 1104 struct mvpp21_rx_desc pp21; 1105 struct mvpp22_rx_desc pp22; 1106 }; 1107 }; 1108 1109 /* Per-CPU Tx queue control */ 1110 struct mvpp2_txq_pcpu { 1111 int cpu; 1112 1113 /* Number of Tx DMA descriptors in the descriptor ring */ 1114 int size; 1115 1116 /* Number of currently used Tx DMA descriptor in the 1117 * descriptor ring 1118 */ 1119 int count; 1120 1121 /* Number of Tx DMA descriptors reserved for each CPU */ 1122 int reserved_num; 1123 1124 /* Index of last TX DMA descriptor that was inserted */ 1125 int txq_put_index; 1126 1127 /* Index of the TX DMA descriptor to be cleaned up */ 1128 int txq_get_index; 1129 }; 1130 1131 struct mvpp2_tx_queue { 1132 /* Physical number of this Tx queue */ 1133 u8 id; 1134 1135 /* Logical number of this Tx queue */ 1136 u8 log_id; 1137 1138 /* Number of Tx DMA descriptors in the descriptor ring */ 1139 int size; 1140 1141 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1142 int count; 1143 1144 /* Per-CPU control of physical Tx queues */ 1145 struct mvpp2_txq_pcpu __percpu *pcpu; 1146 1147 u32 done_pkts_coal; 1148 1149 /* Virtual address of thex Tx DMA descriptors array */ 1150 struct mvpp2_tx_desc *descs; 1151 1152 /* DMA address of the Tx DMA descriptors array */ 1153 dma_addr_t descs_dma; 1154 1155 /* Index of the last Tx DMA descriptor */ 1156 int last_desc; 1157 1158 /* Index of the next Tx DMA descriptor to process */ 1159 int next_desc_to_proc; 1160 }; 1161 1162 struct mvpp2_rx_queue { 1163 /* RX queue number, in the range 0-31 for physical RXQs */ 1164 u8 id; 1165 1166 /* Num of rx descriptors in the rx descriptor ring */ 1167 int size; 1168 1169 u32 pkts_coal; 1170 u32 time_coal; 1171 1172 /* Virtual address of the RX DMA descriptors array */ 1173 struct mvpp2_rx_desc *descs; 1174 1175 /* DMA address of the RX DMA descriptors array */ 1176 dma_addr_t descs_dma; 1177 1178 /* Index of the last RX DMA descriptor */ 1179 int last_desc; 1180 1181 /* Index of the next RX DMA descriptor to process */ 1182 int next_desc_to_proc; 1183 1184 /* ID of port to which physical RXQ is mapped */ 1185 int port; 1186 1187 /* Port's logic RXQ number to which physical RXQ is mapped */ 1188 int logic_rxq; 1189 }; 1190 1191 union mvpp2_prs_tcam_entry { 1192 u32 word[MVPP2_PRS_TCAM_WORDS]; 1193 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1194 }; 1195 1196 union mvpp2_prs_sram_entry { 1197 u32 word[MVPP2_PRS_SRAM_WORDS]; 1198 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1199 }; 1200 1201 struct mvpp2_prs_entry { 1202 u32 index; 1203 union mvpp2_prs_tcam_entry tcam; 1204 union mvpp2_prs_sram_entry sram; 1205 }; 1206 1207 struct mvpp2_prs_shadow { 1208 bool valid; 1209 bool finish; 1210 1211 /* Lookup ID */ 1212 int lu; 1213 1214 /* User defined offset */ 1215 int udf; 1216 1217 /* Result info */ 1218 u32 ri; 1219 u32 ri_mask; 1220 }; 1221 1222 struct mvpp2_cls_flow_entry { 1223 u32 index; 1224 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1225 }; 1226 1227 struct mvpp2_cls_lookup_entry { 1228 u32 lkpid; 1229 u32 way; 1230 u32 data; 1231 }; 1232 1233 struct mvpp2_bm_pool { 1234 /* Pool number in the range 0-7 */ 1235 int id; 1236 enum mvpp2_bm_type type; 1237 1238 /* Buffer Pointers Pool External (BPPE) size */ 1239 int size; 1240 /* Number of buffers for this pool */ 1241 int buf_num; 1242 /* Pool buffer size */ 1243 int buf_size; 1244 /* Packet size */ 1245 int pkt_size; 1246 1247 /* BPPE virtual base address */ 1248 unsigned long *virt_addr; 1249 /* BPPE DMA base address */ 1250 dma_addr_t dma_addr; 1251 1252 /* Ports using BM pool */ 1253 u32 port_map; 1254 }; 1255 1256 /* Static declaractions */ 1257 1258 /* Number of RXQs used by single port */ 1259 static int rxq_number = MVPP2_DEFAULT_RXQ; 1260 /* Number of TXQs used by single port */ 1261 static int txq_number = MVPP2_DEFAULT_TXQ; 1262 1263 static int base_id; 1264 1265 #define MVPP2_DRIVER_NAME "mvpp2" 1266 #define MVPP2_DRIVER_VERSION "1.0" 1267 1268 /* 1269 * U-Boot internal data, mostly uncached buffers for descriptors and data 1270 */ 1271 struct buffer_location { 1272 struct mvpp2_tx_desc *aggr_tx_descs; 1273 struct mvpp2_tx_desc *tx_descs; 1274 struct mvpp2_rx_desc *rx_descs; 1275 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1276 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1277 int first_rxq; 1278 }; 1279 1280 /* 1281 * All 4 interfaces use the same global buffer, since only one interface 1282 * can be enabled at once 1283 */ 1284 static struct buffer_location buffer_loc; 1285 1286 /* 1287 * Page table entries are set to 1MB, or multiples of 1MB 1288 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1289 */ 1290 #define BD_SPACE (1 << 20) 1291 1292 /* Utility/helper methods */ 1293 1294 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1295 { 1296 writel(data, priv->base + offset); 1297 } 1298 1299 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1300 { 1301 return readl(priv->base + offset); 1302 } 1303 1304 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1305 struct mvpp2_tx_desc *tx_desc, 1306 dma_addr_t dma_addr) 1307 { 1308 if (port->priv->hw_version == MVPP21) { 1309 tx_desc->pp21.buf_dma_addr = dma_addr; 1310 } else { 1311 u64 val = (u64)dma_addr; 1312 1313 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1314 tx_desc->pp22.buf_dma_addr_ptp |= val; 1315 } 1316 } 1317 1318 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1319 struct mvpp2_tx_desc *tx_desc, 1320 size_t size) 1321 { 1322 if (port->priv->hw_version == MVPP21) 1323 tx_desc->pp21.data_size = size; 1324 else 1325 tx_desc->pp22.data_size = size; 1326 } 1327 1328 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1329 struct mvpp2_tx_desc *tx_desc, 1330 unsigned int txq) 1331 { 1332 if (port->priv->hw_version == MVPP21) 1333 tx_desc->pp21.phys_txq = txq; 1334 else 1335 tx_desc->pp22.phys_txq = txq; 1336 } 1337 1338 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1339 struct mvpp2_tx_desc *tx_desc, 1340 unsigned int command) 1341 { 1342 if (port->priv->hw_version == MVPP21) 1343 tx_desc->pp21.command = command; 1344 else 1345 tx_desc->pp22.command = command; 1346 } 1347 1348 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1349 struct mvpp2_tx_desc *tx_desc, 1350 unsigned int offset) 1351 { 1352 if (port->priv->hw_version == MVPP21) 1353 tx_desc->pp21.packet_offset = offset; 1354 else 1355 tx_desc->pp22.packet_offset = offset; 1356 } 1357 1358 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1359 struct mvpp2_rx_desc *rx_desc) 1360 { 1361 if (port->priv->hw_version == MVPP21) 1362 return rx_desc->pp21.buf_dma_addr; 1363 else 1364 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1365 } 1366 1367 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1368 struct mvpp2_rx_desc *rx_desc) 1369 { 1370 if (port->priv->hw_version == MVPP21) 1371 return rx_desc->pp21.buf_cookie; 1372 else 1373 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1374 } 1375 1376 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1377 struct mvpp2_rx_desc *rx_desc) 1378 { 1379 if (port->priv->hw_version == MVPP21) 1380 return rx_desc->pp21.data_size; 1381 else 1382 return rx_desc->pp22.data_size; 1383 } 1384 1385 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1386 struct mvpp2_rx_desc *rx_desc) 1387 { 1388 if (port->priv->hw_version == MVPP21) 1389 return rx_desc->pp21.status; 1390 else 1391 return rx_desc->pp22.status; 1392 } 1393 1394 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1395 { 1396 txq_pcpu->txq_get_index++; 1397 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1398 txq_pcpu->txq_get_index = 0; 1399 } 1400 1401 /* Get number of physical egress port */ 1402 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1403 { 1404 return MVPP2_MAX_TCONT + port->id; 1405 } 1406 1407 /* Get number of physical TXQ */ 1408 static inline int mvpp2_txq_phys(int port, int txq) 1409 { 1410 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1411 } 1412 1413 /* Parser configuration routines */ 1414 1415 /* Update parser tcam and sram hw entries */ 1416 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1417 { 1418 int i; 1419 1420 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1421 return -EINVAL; 1422 1423 /* Clear entry invalidation bit */ 1424 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1425 1426 /* Write tcam index - indirect access */ 1427 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1428 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1429 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1430 1431 /* Write sram index - indirect access */ 1432 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1433 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1434 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1435 1436 return 0; 1437 } 1438 1439 /* Read tcam entry from hw */ 1440 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1441 { 1442 int i; 1443 1444 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1445 return -EINVAL; 1446 1447 /* Write tcam index - indirect access */ 1448 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1449 1450 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1451 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1452 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1453 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1454 1455 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1456 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1457 1458 /* Write sram index - indirect access */ 1459 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1460 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1461 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1462 1463 return 0; 1464 } 1465 1466 /* Invalidate tcam hw entry */ 1467 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1468 { 1469 /* Write index - indirect access */ 1470 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1471 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1472 MVPP2_PRS_TCAM_INV_MASK); 1473 } 1474 1475 /* Enable shadow table entry and set its lookup ID */ 1476 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1477 { 1478 priv->prs_shadow[index].valid = true; 1479 priv->prs_shadow[index].lu = lu; 1480 } 1481 1482 /* Update ri fields in shadow table entry */ 1483 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1484 unsigned int ri, unsigned int ri_mask) 1485 { 1486 priv->prs_shadow[index].ri_mask = ri_mask; 1487 priv->prs_shadow[index].ri = ri; 1488 } 1489 1490 /* Update lookup field in tcam sw entry */ 1491 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1492 { 1493 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1494 1495 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1496 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1497 } 1498 1499 /* Update mask for single port in tcam sw entry */ 1500 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1501 unsigned int port, bool add) 1502 { 1503 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1504 1505 if (add) 1506 pe->tcam.byte[enable_off] &= ~(1 << port); 1507 else 1508 pe->tcam.byte[enable_off] |= 1 << port; 1509 } 1510 1511 /* Update port map in tcam sw entry */ 1512 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1513 unsigned int ports) 1514 { 1515 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1516 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1517 1518 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1519 pe->tcam.byte[enable_off] &= ~port_mask; 1520 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1521 } 1522 1523 /* Obtain port map from tcam sw entry */ 1524 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1525 { 1526 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1527 1528 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1529 } 1530 1531 /* Set byte of data and its enable bits in tcam sw entry */ 1532 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1533 unsigned int offs, unsigned char byte, 1534 unsigned char enable) 1535 { 1536 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1537 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1538 } 1539 1540 /* Get byte of data and its enable bits from tcam sw entry */ 1541 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1542 unsigned int offs, unsigned char *byte, 1543 unsigned char *enable) 1544 { 1545 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1546 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1547 } 1548 1549 /* Set ethertype in tcam sw entry */ 1550 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1551 unsigned short ethertype) 1552 { 1553 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1554 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1555 } 1556 1557 /* Set bits in sram sw entry */ 1558 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1559 int val) 1560 { 1561 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1562 } 1563 1564 /* Clear bits in sram sw entry */ 1565 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1566 int val) 1567 { 1568 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1569 } 1570 1571 /* Update ri bits in sram sw entry */ 1572 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1573 unsigned int bits, unsigned int mask) 1574 { 1575 unsigned int i; 1576 1577 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1578 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1579 1580 if (!(mask & BIT(i))) 1581 continue; 1582 1583 if (bits & BIT(i)) 1584 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1585 else 1586 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1587 1588 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1589 } 1590 } 1591 1592 /* Update ai bits in sram sw entry */ 1593 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1594 unsigned int bits, unsigned int mask) 1595 { 1596 unsigned int i; 1597 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1598 1599 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1600 1601 if (!(mask & BIT(i))) 1602 continue; 1603 1604 if (bits & BIT(i)) 1605 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1606 else 1607 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1608 1609 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1610 } 1611 } 1612 1613 /* Read ai bits from sram sw entry */ 1614 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1615 { 1616 u8 bits; 1617 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1618 int ai_en_off = ai_off + 1; 1619 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1620 1621 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1622 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1623 1624 return bits; 1625 } 1626 1627 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1628 * lookup interation 1629 */ 1630 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1631 unsigned int lu) 1632 { 1633 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1634 1635 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1636 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1637 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1638 } 1639 1640 /* In the sram sw entry set sign and value of the next lookup offset 1641 * and the offset value generated to the classifier 1642 */ 1643 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1644 unsigned int op) 1645 { 1646 /* Set sign */ 1647 if (shift < 0) { 1648 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1649 shift = 0 - shift; 1650 } else { 1651 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1652 } 1653 1654 /* Set value */ 1655 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1656 (unsigned char)shift; 1657 1658 /* Reset and set operation */ 1659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1660 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1661 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1662 1663 /* Set base offset as current */ 1664 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1665 } 1666 1667 /* In the sram sw entry set sign and value of the user defined offset 1668 * generated to the classifier 1669 */ 1670 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1671 unsigned int type, int offset, 1672 unsigned int op) 1673 { 1674 /* Set sign */ 1675 if (offset < 0) { 1676 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1677 offset = 0 - offset; 1678 } else { 1679 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1680 } 1681 1682 /* Set value */ 1683 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1684 MVPP2_PRS_SRAM_UDF_MASK); 1685 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1686 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1687 MVPP2_PRS_SRAM_UDF_BITS)] &= 1688 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1689 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1690 MVPP2_PRS_SRAM_UDF_BITS)] |= 1691 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1692 1693 /* Set offset type */ 1694 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1695 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1696 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1697 1698 /* Set offset operation */ 1699 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1700 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1701 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1702 1703 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1704 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1705 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1706 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1707 1708 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1709 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1710 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1711 1712 /* Set base offset as current */ 1713 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1714 } 1715 1716 /* Find parser flow entry */ 1717 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1718 { 1719 struct mvpp2_prs_entry *pe; 1720 int tid; 1721 1722 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1723 if (!pe) 1724 return NULL; 1725 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1726 1727 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1728 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1729 u8 bits; 1730 1731 if (!priv->prs_shadow[tid].valid || 1732 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1733 continue; 1734 1735 pe->index = tid; 1736 mvpp2_prs_hw_read(priv, pe); 1737 bits = mvpp2_prs_sram_ai_get(pe); 1738 1739 /* Sram store classification lookup ID in AI bits [5:0] */ 1740 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1741 return pe; 1742 } 1743 kfree(pe); 1744 1745 return NULL; 1746 } 1747 1748 /* Return first free tcam index, seeking from start to end */ 1749 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1750 unsigned char end) 1751 { 1752 int tid; 1753 1754 if (start > end) 1755 swap(start, end); 1756 1757 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1758 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1759 1760 for (tid = start; tid <= end; tid++) { 1761 if (!priv->prs_shadow[tid].valid) 1762 return tid; 1763 } 1764 1765 return -EINVAL; 1766 } 1767 1768 /* Enable/disable dropping all mac da's */ 1769 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1770 { 1771 struct mvpp2_prs_entry pe; 1772 1773 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1774 /* Entry exist - update port only */ 1775 pe.index = MVPP2_PE_DROP_ALL; 1776 mvpp2_prs_hw_read(priv, &pe); 1777 } else { 1778 /* Entry doesn't exist - create new */ 1779 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1780 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1781 pe.index = MVPP2_PE_DROP_ALL; 1782 1783 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1784 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1785 MVPP2_PRS_RI_DROP_MASK); 1786 1787 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1788 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1789 1790 /* Update shadow table */ 1791 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1792 1793 /* Mask all ports */ 1794 mvpp2_prs_tcam_port_map_set(&pe, 0); 1795 } 1796 1797 /* Update port mask */ 1798 mvpp2_prs_tcam_port_set(&pe, port, add); 1799 1800 mvpp2_prs_hw_write(priv, &pe); 1801 } 1802 1803 /* Set port to promiscuous mode */ 1804 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1805 { 1806 struct mvpp2_prs_entry pe; 1807 1808 /* Promiscuous mode - Accept unknown packets */ 1809 1810 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1811 /* Entry exist - update port only */ 1812 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1813 mvpp2_prs_hw_read(priv, &pe); 1814 } else { 1815 /* Entry doesn't exist - create new */ 1816 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1817 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1818 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1819 1820 /* Continue - set next lookup */ 1821 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1822 1823 /* Set result info bits */ 1824 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1825 MVPP2_PRS_RI_L2_CAST_MASK); 1826 1827 /* Shift to ethertype */ 1828 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1829 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1830 1831 /* Mask all ports */ 1832 mvpp2_prs_tcam_port_map_set(&pe, 0); 1833 1834 /* Update shadow table */ 1835 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1836 } 1837 1838 /* Update port mask */ 1839 mvpp2_prs_tcam_port_set(&pe, port, add); 1840 1841 mvpp2_prs_hw_write(priv, &pe); 1842 } 1843 1844 /* Accept multicast */ 1845 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1846 bool add) 1847 { 1848 struct mvpp2_prs_entry pe; 1849 unsigned char da_mc; 1850 1851 /* Ethernet multicast address first byte is 1852 * 0x01 for IPv4 and 0x33 for IPv6 1853 */ 1854 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1855 1856 if (priv->prs_shadow[index].valid) { 1857 /* Entry exist - update port only */ 1858 pe.index = index; 1859 mvpp2_prs_hw_read(priv, &pe); 1860 } else { 1861 /* Entry doesn't exist - create new */ 1862 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1863 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1864 pe.index = index; 1865 1866 /* Continue - set next lookup */ 1867 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1868 1869 /* Set result info bits */ 1870 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1871 MVPP2_PRS_RI_L2_CAST_MASK); 1872 1873 /* Update tcam entry data first byte */ 1874 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1875 1876 /* Shift to ethertype */ 1877 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1878 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1879 1880 /* Mask all ports */ 1881 mvpp2_prs_tcam_port_map_set(&pe, 0); 1882 1883 /* Update shadow table */ 1884 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1885 } 1886 1887 /* Update port mask */ 1888 mvpp2_prs_tcam_port_set(&pe, port, add); 1889 1890 mvpp2_prs_hw_write(priv, &pe); 1891 } 1892 1893 /* Parser per-port initialization */ 1894 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1895 int lu_max, int offset) 1896 { 1897 u32 val; 1898 1899 /* Set lookup ID */ 1900 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1901 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1902 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1903 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1904 1905 /* Set maximum number of loops for packet received from port */ 1906 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1907 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1908 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1909 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1910 1911 /* Set initial offset for packet header extraction for the first 1912 * searching loop 1913 */ 1914 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1915 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1916 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1917 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1918 } 1919 1920 /* Default flow entries initialization for all ports */ 1921 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1922 { 1923 struct mvpp2_prs_entry pe; 1924 int port; 1925 1926 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1927 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1928 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1929 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1930 1931 /* Mask all ports */ 1932 mvpp2_prs_tcam_port_map_set(&pe, 0); 1933 1934 /* Set flow ID*/ 1935 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1936 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1937 1938 /* Update shadow table and hw entry */ 1939 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1940 mvpp2_prs_hw_write(priv, &pe); 1941 } 1942 } 1943 1944 /* Set default entry for Marvell Header field */ 1945 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1946 { 1947 struct mvpp2_prs_entry pe; 1948 1949 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1950 1951 pe.index = MVPP2_PE_MH_DEFAULT; 1952 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1953 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1954 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1955 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1956 1957 /* Unmask all ports */ 1958 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1959 1960 /* Update shadow table and hw entry */ 1961 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1962 mvpp2_prs_hw_write(priv, &pe); 1963 } 1964 1965 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1966 * multicast MAC addresses 1967 */ 1968 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1969 { 1970 struct mvpp2_prs_entry pe; 1971 1972 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1973 1974 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1975 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1976 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1977 1978 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1979 MVPP2_PRS_RI_DROP_MASK); 1980 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1981 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1982 1983 /* Unmask all ports */ 1984 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1985 1986 /* Update shadow table and hw entry */ 1987 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1988 mvpp2_prs_hw_write(priv, &pe); 1989 1990 /* place holders only - no ports */ 1991 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1992 mvpp2_prs_mac_promisc_set(priv, 0, false); 1993 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1994 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1995 } 1996 1997 /* Match basic ethertypes */ 1998 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1999 { 2000 struct mvpp2_prs_entry pe; 2001 int tid; 2002 2003 /* Ethertype: PPPoE */ 2004 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2005 MVPP2_PE_LAST_FREE_TID); 2006 if (tid < 0) 2007 return tid; 2008 2009 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2010 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2011 pe.index = tid; 2012 2013 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2014 2015 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2016 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2017 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2018 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2019 MVPP2_PRS_RI_PPPOE_MASK); 2020 2021 /* Update shadow table and hw entry */ 2022 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2023 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2024 priv->prs_shadow[pe.index].finish = false; 2025 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2026 MVPP2_PRS_RI_PPPOE_MASK); 2027 mvpp2_prs_hw_write(priv, &pe); 2028 2029 /* Ethertype: ARP */ 2030 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2031 MVPP2_PE_LAST_FREE_TID); 2032 if (tid < 0) 2033 return tid; 2034 2035 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2036 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2037 pe.index = tid; 2038 2039 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2040 2041 /* Generate flow in the next iteration*/ 2042 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2043 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2044 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2045 MVPP2_PRS_RI_L3_PROTO_MASK); 2046 /* Set L3 offset */ 2047 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2048 MVPP2_ETH_TYPE_LEN, 2049 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2050 2051 /* Update shadow table and hw entry */ 2052 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2053 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2054 priv->prs_shadow[pe.index].finish = true; 2055 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2056 MVPP2_PRS_RI_L3_PROTO_MASK); 2057 mvpp2_prs_hw_write(priv, &pe); 2058 2059 /* Ethertype: LBTD */ 2060 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2061 MVPP2_PE_LAST_FREE_TID); 2062 if (tid < 0) 2063 return tid; 2064 2065 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2066 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2067 pe.index = tid; 2068 2069 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2070 2071 /* Generate flow in the next iteration*/ 2072 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2073 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2074 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2075 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2076 MVPP2_PRS_RI_CPU_CODE_MASK | 2077 MVPP2_PRS_RI_UDF3_MASK); 2078 /* Set L3 offset */ 2079 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2080 MVPP2_ETH_TYPE_LEN, 2081 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2082 2083 /* Update shadow table and hw entry */ 2084 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2085 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2086 priv->prs_shadow[pe.index].finish = true; 2087 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2088 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2089 MVPP2_PRS_RI_CPU_CODE_MASK | 2090 MVPP2_PRS_RI_UDF3_MASK); 2091 mvpp2_prs_hw_write(priv, &pe); 2092 2093 /* Ethertype: IPv4 without options */ 2094 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2095 MVPP2_PE_LAST_FREE_TID); 2096 if (tid < 0) 2097 return tid; 2098 2099 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2100 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2101 pe.index = tid; 2102 2103 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2104 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2105 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2106 MVPP2_PRS_IPV4_HEAD_MASK | 2107 MVPP2_PRS_IPV4_IHL_MASK); 2108 2109 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2110 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2111 MVPP2_PRS_RI_L3_PROTO_MASK); 2112 /* Skip eth_type + 4 bytes of IP header */ 2113 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2114 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2115 /* Set L3 offset */ 2116 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2117 MVPP2_ETH_TYPE_LEN, 2118 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2119 2120 /* Update shadow table and hw entry */ 2121 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2122 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2123 priv->prs_shadow[pe.index].finish = false; 2124 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2125 MVPP2_PRS_RI_L3_PROTO_MASK); 2126 mvpp2_prs_hw_write(priv, &pe); 2127 2128 /* Ethertype: IPv4 with options */ 2129 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2130 MVPP2_PE_LAST_FREE_TID); 2131 if (tid < 0) 2132 return tid; 2133 2134 pe.index = tid; 2135 2136 /* Clear tcam data before updating */ 2137 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2138 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2139 2140 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2141 MVPP2_PRS_IPV4_HEAD, 2142 MVPP2_PRS_IPV4_HEAD_MASK); 2143 2144 /* Clear ri before updating */ 2145 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2146 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2147 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2148 MVPP2_PRS_RI_L3_PROTO_MASK); 2149 2150 /* Update shadow table and hw entry */ 2151 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2152 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2153 priv->prs_shadow[pe.index].finish = false; 2154 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2155 MVPP2_PRS_RI_L3_PROTO_MASK); 2156 mvpp2_prs_hw_write(priv, &pe); 2157 2158 /* Ethertype: IPv6 without options */ 2159 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2160 MVPP2_PE_LAST_FREE_TID); 2161 if (tid < 0) 2162 return tid; 2163 2164 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2165 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2166 pe.index = tid; 2167 2168 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2169 2170 /* Skip DIP of IPV6 header */ 2171 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2172 MVPP2_MAX_L3_ADDR_SIZE, 2173 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2174 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2175 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2176 MVPP2_PRS_RI_L3_PROTO_MASK); 2177 /* Set L3 offset */ 2178 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2179 MVPP2_ETH_TYPE_LEN, 2180 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2181 2182 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2183 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2184 priv->prs_shadow[pe.index].finish = false; 2185 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2186 MVPP2_PRS_RI_L3_PROTO_MASK); 2187 mvpp2_prs_hw_write(priv, &pe); 2188 2189 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2190 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2191 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2192 pe.index = MVPP2_PE_ETH_TYPE_UN; 2193 2194 /* Unmask all ports */ 2195 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2196 2197 /* Generate flow in the next iteration*/ 2198 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2199 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2200 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2201 MVPP2_PRS_RI_L3_PROTO_MASK); 2202 /* Set L3 offset even it's unknown L3 */ 2203 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2204 MVPP2_ETH_TYPE_LEN, 2205 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2206 2207 /* Update shadow table and hw entry */ 2208 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2209 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2210 priv->prs_shadow[pe.index].finish = true; 2211 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2212 MVPP2_PRS_RI_L3_PROTO_MASK); 2213 mvpp2_prs_hw_write(priv, &pe); 2214 2215 return 0; 2216 } 2217 2218 /* Parser default initialization */ 2219 static int mvpp2_prs_default_init(struct udevice *dev, 2220 struct mvpp2 *priv) 2221 { 2222 int err, index, i; 2223 2224 /* Enable tcam table */ 2225 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2226 2227 /* Clear all tcam and sram entries */ 2228 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2229 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2230 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2231 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2232 2233 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2234 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2235 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2236 } 2237 2238 /* Invalidate all tcam entries */ 2239 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2240 mvpp2_prs_hw_inv(priv, index); 2241 2242 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2243 sizeof(struct mvpp2_prs_shadow), 2244 GFP_KERNEL); 2245 if (!priv->prs_shadow) 2246 return -ENOMEM; 2247 2248 /* Always start from lookup = 0 */ 2249 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2250 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2251 MVPP2_PRS_PORT_LU_MAX, 0); 2252 2253 mvpp2_prs_def_flow_init(priv); 2254 2255 mvpp2_prs_mh_init(priv); 2256 2257 mvpp2_prs_mac_init(priv); 2258 2259 err = mvpp2_prs_etype_init(priv); 2260 if (err) 2261 return err; 2262 2263 return 0; 2264 } 2265 2266 /* Compare MAC DA with tcam entry data */ 2267 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2268 const u8 *da, unsigned char *mask) 2269 { 2270 unsigned char tcam_byte, tcam_mask; 2271 int index; 2272 2273 for (index = 0; index < ETH_ALEN; index++) { 2274 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2275 if (tcam_mask != mask[index]) 2276 return false; 2277 2278 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2279 return false; 2280 } 2281 2282 return true; 2283 } 2284 2285 /* Find tcam entry with matched pair <MAC DA, port> */ 2286 static struct mvpp2_prs_entry * 2287 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2288 unsigned char *mask, int udf_type) 2289 { 2290 struct mvpp2_prs_entry *pe; 2291 int tid; 2292 2293 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2294 if (!pe) 2295 return NULL; 2296 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2297 2298 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2299 for (tid = MVPP2_PE_FIRST_FREE_TID; 2300 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2301 unsigned int entry_pmap; 2302 2303 if (!priv->prs_shadow[tid].valid || 2304 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2305 (priv->prs_shadow[tid].udf != udf_type)) 2306 continue; 2307 2308 pe->index = tid; 2309 mvpp2_prs_hw_read(priv, pe); 2310 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2311 2312 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2313 entry_pmap == pmap) 2314 return pe; 2315 } 2316 kfree(pe); 2317 2318 return NULL; 2319 } 2320 2321 /* Update parser's mac da entry */ 2322 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2323 const u8 *da, bool add) 2324 { 2325 struct mvpp2_prs_entry *pe; 2326 unsigned int pmap, len, ri; 2327 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2328 int tid; 2329 2330 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2331 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2332 MVPP2_PRS_UDF_MAC_DEF); 2333 2334 /* No such entry */ 2335 if (!pe) { 2336 if (!add) 2337 return 0; 2338 2339 /* Create new TCAM entry */ 2340 /* Find first range mac entry*/ 2341 for (tid = MVPP2_PE_FIRST_FREE_TID; 2342 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2343 if (priv->prs_shadow[tid].valid && 2344 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2345 (priv->prs_shadow[tid].udf == 2346 MVPP2_PRS_UDF_MAC_RANGE)) 2347 break; 2348 2349 /* Go through the all entries from first to last */ 2350 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2351 tid - 1); 2352 if (tid < 0) 2353 return tid; 2354 2355 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2356 if (!pe) 2357 return -1; 2358 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2359 pe->index = tid; 2360 2361 /* Mask all ports */ 2362 mvpp2_prs_tcam_port_map_set(pe, 0); 2363 } 2364 2365 /* Update port mask */ 2366 mvpp2_prs_tcam_port_set(pe, port, add); 2367 2368 /* Invalidate the entry if no ports are left enabled */ 2369 pmap = mvpp2_prs_tcam_port_map_get(pe); 2370 if (pmap == 0) { 2371 if (add) { 2372 kfree(pe); 2373 return -1; 2374 } 2375 mvpp2_prs_hw_inv(priv, pe->index); 2376 priv->prs_shadow[pe->index].valid = false; 2377 kfree(pe); 2378 return 0; 2379 } 2380 2381 /* Continue - set next lookup */ 2382 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2383 2384 /* Set match on DA */ 2385 len = ETH_ALEN; 2386 while (len--) 2387 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2388 2389 /* Set result info bits */ 2390 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2391 2392 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2393 MVPP2_PRS_RI_MAC_ME_MASK); 2394 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2395 MVPP2_PRS_RI_MAC_ME_MASK); 2396 2397 /* Shift to ethertype */ 2398 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2399 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2400 2401 /* Update shadow table and hw entry */ 2402 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2403 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2404 mvpp2_prs_hw_write(priv, pe); 2405 2406 kfree(pe); 2407 2408 return 0; 2409 } 2410 2411 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2412 { 2413 int err; 2414 2415 /* Remove old parser entry */ 2416 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2417 false); 2418 if (err) 2419 return err; 2420 2421 /* Add new parser entry */ 2422 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2423 if (err) 2424 return err; 2425 2426 /* Set addr in the device */ 2427 memcpy(port->dev_addr, da, ETH_ALEN); 2428 2429 return 0; 2430 } 2431 2432 /* Set prs flow for the port */ 2433 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2434 { 2435 struct mvpp2_prs_entry *pe; 2436 int tid; 2437 2438 pe = mvpp2_prs_flow_find(port->priv, port->id); 2439 2440 /* Such entry not exist */ 2441 if (!pe) { 2442 /* Go through the all entires from last to first */ 2443 tid = mvpp2_prs_tcam_first_free(port->priv, 2444 MVPP2_PE_LAST_FREE_TID, 2445 MVPP2_PE_FIRST_FREE_TID); 2446 if (tid < 0) 2447 return tid; 2448 2449 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2450 if (!pe) 2451 return -ENOMEM; 2452 2453 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2454 pe->index = tid; 2455 2456 /* Set flow ID*/ 2457 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2458 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2459 2460 /* Update shadow table */ 2461 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2462 } 2463 2464 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2465 mvpp2_prs_hw_write(port->priv, pe); 2466 kfree(pe); 2467 2468 return 0; 2469 } 2470 2471 /* Classifier configuration routines */ 2472 2473 /* Update classification flow table registers */ 2474 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2475 struct mvpp2_cls_flow_entry *fe) 2476 { 2477 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2478 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2479 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2480 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2481 } 2482 2483 /* Update classification lookup table register */ 2484 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2485 struct mvpp2_cls_lookup_entry *le) 2486 { 2487 u32 val; 2488 2489 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2490 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2491 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2492 } 2493 2494 /* Classifier default initialization */ 2495 static void mvpp2_cls_init(struct mvpp2 *priv) 2496 { 2497 struct mvpp2_cls_lookup_entry le; 2498 struct mvpp2_cls_flow_entry fe; 2499 int index; 2500 2501 /* Enable classifier */ 2502 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2503 2504 /* Clear classifier flow table */ 2505 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2506 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2507 fe.index = index; 2508 mvpp2_cls_flow_write(priv, &fe); 2509 } 2510 2511 /* Clear classifier lookup table */ 2512 le.data = 0; 2513 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2514 le.lkpid = index; 2515 le.way = 0; 2516 mvpp2_cls_lookup_write(priv, &le); 2517 2518 le.way = 1; 2519 mvpp2_cls_lookup_write(priv, &le); 2520 } 2521 } 2522 2523 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2524 { 2525 struct mvpp2_cls_lookup_entry le; 2526 u32 val; 2527 2528 /* Set way for the port */ 2529 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2530 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2531 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2532 2533 /* Pick the entry to be accessed in lookup ID decoding table 2534 * according to the way and lkpid. 2535 */ 2536 le.lkpid = port->id; 2537 le.way = 0; 2538 le.data = 0; 2539 2540 /* Set initial CPU queue for receiving packets */ 2541 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2542 le.data |= port->first_rxq; 2543 2544 /* Disable classification engines */ 2545 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2546 2547 /* Update lookup ID table entry */ 2548 mvpp2_cls_lookup_write(port->priv, &le); 2549 } 2550 2551 /* Set CPU queue number for oversize packets */ 2552 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2553 { 2554 u32 val; 2555 2556 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2557 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2558 2559 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2560 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2561 2562 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2563 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2564 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2565 } 2566 2567 /* Buffer Manager configuration routines */ 2568 2569 /* Create pool */ 2570 static int mvpp2_bm_pool_create(struct udevice *dev, 2571 struct mvpp2 *priv, 2572 struct mvpp2_bm_pool *bm_pool, int size) 2573 { 2574 u32 val; 2575 2576 /* Number of buffer pointers must be a multiple of 16, as per 2577 * hardware constraints 2578 */ 2579 if (!IS_ALIGNED(size, 16)) 2580 return -EINVAL; 2581 2582 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2583 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2584 if (!bm_pool->virt_addr) 2585 return -ENOMEM; 2586 2587 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2588 MVPP2_BM_POOL_PTR_ALIGN)) { 2589 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2590 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2591 return -ENOMEM; 2592 } 2593 2594 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2595 lower_32_bits(bm_pool->dma_addr)); 2596 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2597 2598 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2599 val |= MVPP2_BM_START_MASK; 2600 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2601 2602 bm_pool->type = MVPP2_BM_FREE; 2603 bm_pool->size = size; 2604 bm_pool->pkt_size = 0; 2605 bm_pool->buf_num = 0; 2606 2607 return 0; 2608 } 2609 2610 /* Set pool buffer size */ 2611 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2612 struct mvpp2_bm_pool *bm_pool, 2613 int buf_size) 2614 { 2615 u32 val; 2616 2617 bm_pool->buf_size = buf_size; 2618 2619 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2620 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2621 } 2622 2623 /* Free all buffers from the pool */ 2624 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2625 struct mvpp2_bm_pool *bm_pool) 2626 { 2627 int i; 2628 2629 for (i = 0; i < bm_pool->buf_num; i++) { 2630 /* Allocate buffer back from the buffer manager */ 2631 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2632 } 2633 2634 bm_pool->buf_num = 0; 2635 } 2636 2637 /* Cleanup pool */ 2638 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2639 struct mvpp2 *priv, 2640 struct mvpp2_bm_pool *bm_pool) 2641 { 2642 u32 val; 2643 2644 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2645 if (bm_pool->buf_num) { 2646 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2647 return 0; 2648 } 2649 2650 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2651 val |= MVPP2_BM_STOP_MASK; 2652 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2653 2654 return 0; 2655 } 2656 2657 static int mvpp2_bm_pools_init(struct udevice *dev, 2658 struct mvpp2 *priv) 2659 { 2660 int i, err, size; 2661 struct mvpp2_bm_pool *bm_pool; 2662 2663 /* Create all pools with maximum size */ 2664 size = MVPP2_BM_POOL_SIZE_MAX; 2665 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2666 bm_pool = &priv->bm_pools[i]; 2667 bm_pool->id = i; 2668 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2669 if (err) 2670 goto err_unroll_pools; 2671 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 2672 } 2673 return 0; 2674 2675 err_unroll_pools: 2676 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2677 for (i = i - 1; i >= 0; i--) 2678 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2679 return err; 2680 } 2681 2682 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2683 { 2684 int i, err; 2685 2686 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2687 /* Mask BM all interrupts */ 2688 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2689 /* Clear BM cause register */ 2690 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2691 } 2692 2693 /* Allocate and initialize BM pools */ 2694 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2695 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2696 if (!priv->bm_pools) 2697 return -ENOMEM; 2698 2699 err = mvpp2_bm_pools_init(dev, priv); 2700 if (err < 0) 2701 return err; 2702 return 0; 2703 } 2704 2705 /* Attach long pool to rxq */ 2706 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2707 int lrxq, int long_pool) 2708 { 2709 u32 val, mask; 2710 int prxq; 2711 2712 /* Get queue physical ID */ 2713 prxq = port->rxqs[lrxq]->id; 2714 2715 if (port->priv->hw_version == MVPP21) 2716 mask = MVPP21_RXQ_POOL_LONG_MASK; 2717 else 2718 mask = MVPP22_RXQ_POOL_LONG_MASK; 2719 2720 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2721 val &= ~mask; 2722 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2723 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2724 } 2725 2726 /* Set pool number in a BM cookie */ 2727 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2728 { 2729 u32 bm; 2730 2731 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2732 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2733 2734 return bm; 2735 } 2736 2737 /* Get pool number from a BM cookie */ 2738 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2739 { 2740 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2741 } 2742 2743 /* Release buffer to BM */ 2744 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2745 dma_addr_t buf_dma_addr, 2746 unsigned long buf_phys_addr) 2747 { 2748 if (port->priv->hw_version == MVPP22) { 2749 u32 val = 0; 2750 2751 if (sizeof(dma_addr_t) == 8) 2752 val |= upper_32_bits(buf_dma_addr) & 2753 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2754 2755 if (sizeof(phys_addr_t) == 8) 2756 val |= (upper_32_bits(buf_phys_addr) 2757 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2758 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2759 2760 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2761 } 2762 2763 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2764 * returned in the "cookie" field of the RX 2765 * descriptor. Instead of storing the virtual address, we 2766 * store the physical address 2767 */ 2768 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2769 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2770 } 2771 2772 /* Refill BM pool */ 2773 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2774 dma_addr_t dma_addr, 2775 phys_addr_t phys_addr) 2776 { 2777 int pool = mvpp2_bm_cookie_pool_get(bm); 2778 2779 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2780 } 2781 2782 /* Allocate buffers for the pool */ 2783 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2784 struct mvpp2_bm_pool *bm_pool, int buf_num) 2785 { 2786 int i; 2787 2788 if (buf_num < 0 || 2789 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2790 netdev_err(port->dev, 2791 "cannot allocate %d buffers for pool %d\n", 2792 buf_num, bm_pool->id); 2793 return 0; 2794 } 2795 2796 for (i = 0; i < buf_num; i++) { 2797 mvpp2_bm_pool_put(port, bm_pool->id, 2798 (dma_addr_t)buffer_loc.rx_buffer[i], 2799 (unsigned long)buffer_loc.rx_buffer[i]); 2800 2801 } 2802 2803 /* Update BM driver with number of buffers added to pool */ 2804 bm_pool->buf_num += i; 2805 2806 return i; 2807 } 2808 2809 /* Notify the driver that BM pool is being used as specific type and return the 2810 * pool pointer on success 2811 */ 2812 static struct mvpp2_bm_pool * 2813 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2814 int pkt_size) 2815 { 2816 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2817 int num; 2818 2819 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2820 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2821 return NULL; 2822 } 2823 2824 if (new_pool->type == MVPP2_BM_FREE) 2825 new_pool->type = type; 2826 2827 /* Allocate buffers in case BM pool is used as long pool, but packet 2828 * size doesn't match MTU or BM pool hasn't being used yet 2829 */ 2830 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2831 (new_pool->pkt_size == 0)) { 2832 int pkts_num; 2833 2834 /* Set default buffer number or free all the buffers in case 2835 * the pool is not empty 2836 */ 2837 pkts_num = new_pool->buf_num; 2838 if (pkts_num == 0) 2839 pkts_num = type == MVPP2_BM_SWF_LONG ? 2840 MVPP2_BM_LONG_BUF_NUM : 2841 MVPP2_BM_SHORT_BUF_NUM; 2842 else 2843 mvpp2_bm_bufs_free(NULL, 2844 port->priv, new_pool); 2845 2846 new_pool->pkt_size = pkt_size; 2847 2848 /* Allocate buffers for this pool */ 2849 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2850 if (num != pkts_num) { 2851 dev_err(dev, "pool %d: %d of %d allocated\n", 2852 new_pool->id, num, pkts_num); 2853 return NULL; 2854 } 2855 } 2856 2857 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 2858 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 2859 2860 return new_pool; 2861 } 2862 2863 /* Initialize pools for swf */ 2864 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2865 { 2866 int rxq; 2867 2868 if (!port->pool_long) { 2869 port->pool_long = 2870 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2871 MVPP2_BM_SWF_LONG, 2872 port->pkt_size); 2873 if (!port->pool_long) 2874 return -ENOMEM; 2875 2876 port->pool_long->port_map |= (1 << port->id); 2877 2878 for (rxq = 0; rxq < rxq_number; rxq++) 2879 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2880 } 2881 2882 return 0; 2883 } 2884 2885 /* Port configuration routines */ 2886 2887 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2888 { 2889 u32 val; 2890 2891 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2892 2893 switch (port->phy_interface) { 2894 case PHY_INTERFACE_MODE_SGMII: 2895 val |= MVPP2_GMAC_INBAND_AN_MASK; 2896 break; 2897 case PHY_INTERFACE_MODE_RGMII: 2898 case PHY_INTERFACE_MODE_RGMII_ID: 2899 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2900 default: 2901 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2902 } 2903 2904 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2905 } 2906 2907 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2908 { 2909 u32 val; 2910 2911 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2912 val |= MVPP2_GMAC_FC_ADV_EN; 2913 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2914 } 2915 2916 static void mvpp2_port_enable(struct mvpp2_port *port) 2917 { 2918 u32 val; 2919 2920 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2921 val |= MVPP2_GMAC_PORT_EN_MASK; 2922 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2923 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2924 } 2925 2926 static void mvpp2_port_disable(struct mvpp2_port *port) 2927 { 2928 u32 val; 2929 2930 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2931 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2932 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2933 } 2934 2935 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2936 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2937 { 2938 u32 val; 2939 2940 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2941 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2942 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2943 } 2944 2945 /* Configure loopback port */ 2946 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2947 { 2948 u32 val; 2949 2950 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2951 2952 if (port->speed == 1000) 2953 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2954 else 2955 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2956 2957 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2958 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2959 else 2960 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2961 2962 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2963 } 2964 2965 static void mvpp2_port_reset(struct mvpp2_port *port) 2966 { 2967 u32 val; 2968 2969 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2970 ~MVPP2_GMAC_PORT_RESET_MASK; 2971 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2972 2973 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2974 MVPP2_GMAC_PORT_RESET_MASK) 2975 continue; 2976 } 2977 2978 /* Change maximum receive size of the port */ 2979 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2980 { 2981 u32 val; 2982 2983 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2984 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2985 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2986 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2987 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2988 } 2989 2990 /* PPv2.2 GoP/GMAC config */ 2991 2992 /* Set the MAC to reset or exit from reset */ 2993 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2994 { 2995 u32 val; 2996 2997 /* read - modify - write */ 2998 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2999 if (reset) 3000 val |= MVPP2_GMAC_PORT_RESET_MASK; 3001 else 3002 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 3003 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3004 3005 return 0; 3006 } 3007 3008 /* 3009 * gop_gpcs_mode_cfg 3010 * 3011 * Configure port to working with Gig PCS or don't. 3012 */ 3013 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3014 { 3015 u32 val; 3016 3017 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3018 if (en) 3019 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3020 else 3021 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3022 /* enable / disable PCS on this port */ 3023 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3024 3025 return 0; 3026 } 3027 3028 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3029 { 3030 u32 val; 3031 3032 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3033 if (en) 3034 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3035 else 3036 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3037 /* enable / disable PCS on this port */ 3038 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3039 3040 return 0; 3041 } 3042 3043 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3044 { 3045 u32 val, thresh; 3046 3047 /* 3048 * Configure minimal level of the Tx FIFO before the lower part 3049 * starts to read a packet 3050 */ 3051 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3052 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3053 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3054 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3055 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3056 3057 /* Disable bypass of sync module */ 3058 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3059 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3060 /* configure DP clock select according to mode */ 3061 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3062 /* configure QSGMII bypass according to mode */ 3063 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3064 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3065 3066 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3067 /* 3068 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3069 * transceiver 3070 */ 3071 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3072 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3073 3074 /* configure AN 0x9268 */ 3075 val = MVPP2_GMAC_EN_PCS_AN | 3076 MVPP2_GMAC_AN_BYPASS_EN | 3077 MVPP2_GMAC_CONFIG_MII_SPEED | 3078 MVPP2_GMAC_CONFIG_GMII_SPEED | 3079 MVPP2_GMAC_FC_ADV_EN | 3080 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3081 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3082 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3083 } 3084 3085 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3086 { 3087 u32 val, thresh; 3088 3089 /* 3090 * Configure minimal level of the Tx FIFO before the lower part 3091 * starts to read a packet 3092 */ 3093 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3094 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3095 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3096 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3097 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3098 3099 /* Disable bypass of sync module */ 3100 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3101 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3102 /* configure DP clock select according to mode */ 3103 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3104 /* configure QSGMII bypass according to mode */ 3105 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3106 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3107 3108 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3109 /* configure GIG MAC to SGMII mode */ 3110 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3111 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3112 3113 /* configure AN */ 3114 val = MVPP2_GMAC_EN_PCS_AN | 3115 MVPP2_GMAC_AN_BYPASS_EN | 3116 MVPP2_GMAC_AN_SPEED_EN | 3117 MVPP2_GMAC_EN_FC_AN | 3118 MVPP2_GMAC_AN_DUPLEX_EN | 3119 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3120 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3121 } 3122 3123 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3124 { 3125 u32 val, thresh; 3126 3127 /* 3128 * Configure minimal level of the Tx FIFO before the lower part 3129 * starts to read a packet 3130 */ 3131 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3132 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3133 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3134 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3135 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3136 3137 /* Disable bypass of sync module */ 3138 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3139 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3140 /* configure DP clock select according to mode */ 3141 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3142 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3143 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3144 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3145 3146 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3147 /* configure GIG MAC to SGMII mode */ 3148 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3149 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3150 3151 /* configure AN 0xb8e8 */ 3152 val = MVPP2_GMAC_AN_BYPASS_EN | 3153 MVPP2_GMAC_AN_SPEED_EN | 3154 MVPP2_GMAC_EN_FC_AN | 3155 MVPP2_GMAC_AN_DUPLEX_EN | 3156 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3157 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3158 } 3159 3160 /* Set the internal mux's to the required MAC in the GOP */ 3161 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3162 { 3163 u32 val; 3164 3165 /* Set TX FIFO thresholds */ 3166 switch (port->phy_interface) { 3167 case PHY_INTERFACE_MODE_SGMII: 3168 if (port->phy_speed == 2500) 3169 gop_gmac_sgmii2_5_cfg(port); 3170 else 3171 gop_gmac_sgmii_cfg(port); 3172 break; 3173 3174 case PHY_INTERFACE_MODE_RGMII: 3175 case PHY_INTERFACE_MODE_RGMII_ID: 3176 gop_gmac_rgmii_cfg(port); 3177 break; 3178 3179 default: 3180 return -1; 3181 } 3182 3183 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3184 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3185 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3186 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3187 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3188 3189 /* PeriodicXonEn disable */ 3190 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3191 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3192 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3193 3194 return 0; 3195 } 3196 3197 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3198 { 3199 u32 val; 3200 3201 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3202 if (port->gop_id > 0) 3203 return; 3204 3205 /* configure 1Gig MAC mode */ 3206 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3207 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3208 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3209 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3210 } 3211 3212 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3213 { 3214 u32 val; 3215 3216 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3217 if (reset) 3218 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3219 else 3220 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3221 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3222 3223 return 0; 3224 } 3225 3226 /* Set the internal mux's to the required PCS in the PI */ 3227 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3228 { 3229 u32 val; 3230 int lane; 3231 3232 switch (num_of_lanes) { 3233 case 1: 3234 lane = 0; 3235 break; 3236 case 2: 3237 lane = 1; 3238 break; 3239 case 4: 3240 lane = 2; 3241 break; 3242 default: 3243 return -1; 3244 } 3245 3246 /* configure XG MAC mode */ 3247 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3248 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3249 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3250 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3251 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3252 3253 return 0; 3254 } 3255 3256 static int gop_mpcs_mode(struct mvpp2_port *port) 3257 { 3258 u32 val; 3259 3260 /* configure PCS40G COMMON CONTROL */ 3261 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3262 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3263 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3264 3265 /* configure PCS CLOCK RESET */ 3266 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3267 val &= ~CLK_DIVISION_RATIO_MASK; 3268 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3269 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3270 3271 val &= ~CLK_DIV_PHASE_SET_MASK; 3272 val |= MAC_CLK_RESET_MASK; 3273 val |= RX_SD_CLK_RESET_MASK; 3274 val |= TX_SD_CLK_RESET_MASK; 3275 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3276 3277 return 0; 3278 } 3279 3280 /* Set the internal mux's to the required MAC in the GOP */ 3281 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3282 { 3283 u32 val; 3284 3285 /* configure 10G MAC mode */ 3286 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3287 val |= MVPP22_XLG_RX_FC_EN; 3288 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3289 3290 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3291 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3292 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3293 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3294 3295 /* read - modify - write */ 3296 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3297 val &= ~MVPP22_XLG_MODE_DMA_1G; 3298 val |= MVPP22_XLG_FORWARD_PFC_EN; 3299 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3300 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3301 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3302 3303 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3304 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3305 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3306 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3307 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3308 3309 /* unmask link change interrupt */ 3310 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3311 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3312 val |= 1; /* unmask summary bit */ 3313 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3314 3315 return 0; 3316 } 3317 3318 /* Set PCS to reset or exit from reset */ 3319 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3320 { 3321 u32 val; 3322 3323 /* read - modify - write */ 3324 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3325 if (reset) 3326 val &= ~MVPP22_XPCS_PCSRESET; 3327 else 3328 val |= MVPP22_XPCS_PCSRESET; 3329 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3330 3331 return 0; 3332 } 3333 3334 /* Set the MAC to reset or exit from reset */ 3335 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3336 { 3337 u32 val; 3338 3339 /* read - modify - write */ 3340 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3341 if (reset) 3342 val &= ~MVPP22_XLG_MAC_RESETN; 3343 else 3344 val |= MVPP22_XLG_MAC_RESETN; 3345 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3346 3347 return 0; 3348 } 3349 3350 /* 3351 * gop_port_init 3352 * 3353 * Init physical port. Configures the port mode and all it's elements 3354 * accordingly. 3355 * Does not verify that the selected mode/port number is valid at the 3356 * core level. 3357 */ 3358 static int gop_port_init(struct mvpp2_port *port) 3359 { 3360 int mac_num = port->gop_id; 3361 int num_of_act_lanes; 3362 3363 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3364 netdev_err(NULL, "%s: illegal port number %d", __func__, 3365 mac_num); 3366 return -1; 3367 } 3368 3369 switch (port->phy_interface) { 3370 case PHY_INTERFACE_MODE_RGMII: 3371 case PHY_INTERFACE_MODE_RGMII_ID: 3372 gop_gmac_reset(port, 1); 3373 3374 /* configure PCS */ 3375 gop_gpcs_mode_cfg(port, 0); 3376 gop_bypass_clk_cfg(port, 1); 3377 3378 /* configure MAC */ 3379 gop_gmac_mode_cfg(port); 3380 /* pcs unreset */ 3381 gop_gpcs_reset(port, 0); 3382 3383 /* mac unreset */ 3384 gop_gmac_reset(port, 0); 3385 break; 3386 3387 case PHY_INTERFACE_MODE_SGMII: 3388 /* configure PCS */ 3389 gop_gpcs_mode_cfg(port, 1); 3390 3391 /* configure MAC */ 3392 gop_gmac_mode_cfg(port); 3393 /* select proper Mac mode */ 3394 gop_xlg_2_gig_mac_cfg(port); 3395 3396 /* pcs unreset */ 3397 gop_gpcs_reset(port, 0); 3398 /* mac unreset */ 3399 gop_gmac_reset(port, 0); 3400 break; 3401 3402 case PHY_INTERFACE_MODE_SFI: 3403 num_of_act_lanes = 2; 3404 mac_num = 0; 3405 /* configure PCS */ 3406 gop_xpcs_mode(port, num_of_act_lanes); 3407 gop_mpcs_mode(port); 3408 /* configure MAC */ 3409 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3410 3411 /* pcs unreset */ 3412 gop_xpcs_reset(port, 0); 3413 3414 /* mac unreset */ 3415 gop_xlg_mac_reset(port, 0); 3416 break; 3417 3418 default: 3419 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3420 __func__, port->phy_interface); 3421 return -1; 3422 } 3423 3424 return 0; 3425 } 3426 3427 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3428 { 3429 u32 val; 3430 3431 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3432 if (enable) { 3433 /* Enable port and MIB counters update */ 3434 val |= MVPP22_XLG_PORT_EN; 3435 val &= ~MVPP22_XLG_MIBCNT_DIS; 3436 } else { 3437 /* Disable port */ 3438 val &= ~MVPP22_XLG_PORT_EN; 3439 } 3440 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3441 } 3442 3443 static void gop_port_enable(struct mvpp2_port *port, int enable) 3444 { 3445 switch (port->phy_interface) { 3446 case PHY_INTERFACE_MODE_RGMII: 3447 case PHY_INTERFACE_MODE_RGMII_ID: 3448 case PHY_INTERFACE_MODE_SGMII: 3449 if (enable) 3450 mvpp2_port_enable(port); 3451 else 3452 mvpp2_port_disable(port); 3453 break; 3454 3455 case PHY_INTERFACE_MODE_SFI: 3456 gop_xlg_mac_port_enable(port, enable); 3457 3458 break; 3459 default: 3460 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3461 port->phy_interface); 3462 return; 3463 } 3464 } 3465 3466 /* RFU1 functions */ 3467 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3468 { 3469 return readl(priv->rfu1_base + offset); 3470 } 3471 3472 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3473 { 3474 writel(data, priv->rfu1_base + offset); 3475 } 3476 3477 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3478 { 3479 u32 val = 0; 3480 3481 if (gop_id == 2) { 3482 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3483 val |= MV_NETC_GE_MAC2_SGMII; 3484 } 3485 3486 if (gop_id == 3) { 3487 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3488 val |= MV_NETC_GE_MAC3_SGMII; 3489 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3490 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3491 val |= MV_NETC_GE_MAC3_RGMII; 3492 } 3493 3494 return val; 3495 } 3496 3497 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3498 { 3499 u32 reg; 3500 3501 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3502 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3503 3504 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3505 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3506 3507 reg |= val; 3508 3509 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3510 } 3511 3512 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3513 { 3514 u32 reg; 3515 3516 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3517 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3518 3519 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3520 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3521 3522 reg |= val; 3523 3524 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3525 } 3526 3527 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3528 { 3529 u32 reg; 3530 3531 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3532 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3533 3534 val <<= NETC_GOP_SOFT_RESET_OFFS; 3535 val &= NETC_GOP_SOFT_RESET_MASK; 3536 3537 reg |= val; 3538 3539 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3540 } 3541 3542 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3543 { 3544 u32 reg; 3545 3546 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3547 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3548 3549 val <<= NETC_CLK_DIV_PHASE_OFFS; 3550 val &= NETC_CLK_DIV_PHASE_MASK; 3551 3552 reg |= val; 3553 3554 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3555 } 3556 3557 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3558 { 3559 u32 reg; 3560 3561 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3562 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3563 3564 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3565 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3566 3567 reg |= val; 3568 3569 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3570 } 3571 3572 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3573 u32 val) 3574 { 3575 u32 reg, mask, offset; 3576 3577 if (gop_id == 2) { 3578 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3579 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3580 } else { 3581 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3582 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3583 } 3584 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3585 reg &= ~mask; 3586 3587 val <<= offset; 3588 val &= mask; 3589 3590 reg |= val; 3591 3592 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3593 } 3594 3595 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3596 { 3597 u32 reg; 3598 3599 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3600 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3601 3602 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3603 val &= NETC_BUS_WIDTH_SELECT_MASK; 3604 3605 reg |= val; 3606 3607 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3608 } 3609 3610 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3611 { 3612 u32 reg; 3613 3614 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3615 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3616 3617 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3618 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3619 3620 reg |= val; 3621 3622 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3623 } 3624 3625 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3626 enum mv_netc_phase phase) 3627 { 3628 switch (phase) { 3629 case MV_NETC_FIRST_PHASE: 3630 /* Set Bus Width to HB mode = 1 */ 3631 gop_netc_bus_width_select(priv, 1); 3632 /* Select RGMII mode */ 3633 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3634 break; 3635 3636 case MV_NETC_SECOND_PHASE: 3637 /* De-assert the relevant port HB reset */ 3638 gop_netc_port_rf_reset(priv, gop_id, 1); 3639 break; 3640 } 3641 } 3642 3643 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3644 enum mv_netc_phase phase) 3645 { 3646 switch (phase) { 3647 case MV_NETC_FIRST_PHASE: 3648 /* Set Bus Width to HB mode = 1 */ 3649 gop_netc_bus_width_select(priv, 1); 3650 /* Select SGMII mode */ 3651 if (gop_id >= 1) { 3652 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3653 MV_NETC_GBE_SGMII); 3654 } 3655 3656 /* Configure the sample stages */ 3657 gop_netc_sample_stages_timing(priv, 0); 3658 /* Configure the ComPhy Selector */ 3659 /* gop_netc_com_phy_selector_config(netComplex); */ 3660 break; 3661 3662 case MV_NETC_SECOND_PHASE: 3663 /* De-assert the relevant port HB reset */ 3664 gop_netc_port_rf_reset(priv, gop_id, 1); 3665 break; 3666 } 3667 } 3668 3669 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3670 { 3671 u32 c = priv->netc_config; 3672 3673 if (c & MV_NETC_GE_MAC2_SGMII) 3674 gop_netc_mac_to_sgmii(priv, 2, phase); 3675 else 3676 gop_netc_mac_to_xgmii(priv, 2, phase); 3677 3678 if (c & MV_NETC_GE_MAC3_SGMII) { 3679 gop_netc_mac_to_sgmii(priv, 3, phase); 3680 } else { 3681 gop_netc_mac_to_xgmii(priv, 3, phase); 3682 if (c & MV_NETC_GE_MAC3_RGMII) 3683 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3684 else 3685 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3686 } 3687 3688 /* Activate gop ports 0, 2, 3 */ 3689 gop_netc_active_port(priv, 0, 1); 3690 gop_netc_active_port(priv, 2, 1); 3691 gop_netc_active_port(priv, 3, 1); 3692 3693 if (phase == MV_NETC_SECOND_PHASE) { 3694 /* Enable the GOP internal clock logic */ 3695 gop_netc_gop_clock_logic_set(priv, 1); 3696 /* De-assert GOP unit reset */ 3697 gop_netc_gop_reset(priv, 1); 3698 } 3699 3700 return 0; 3701 } 3702 3703 /* Set defaults to the MVPP2 port */ 3704 static void mvpp2_defaults_set(struct mvpp2_port *port) 3705 { 3706 int tx_port_num, val, queue, ptxq, lrxq; 3707 3708 if (port->priv->hw_version == MVPP21) { 3709 /* Configure port to loopback if needed */ 3710 if (port->flags & MVPP2_F_LOOPBACK) 3711 mvpp2_port_loopback_set(port); 3712 3713 /* Update TX FIFO MIN Threshold */ 3714 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3715 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3716 /* Min. TX threshold must be less than minimal packet length */ 3717 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3718 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3719 } 3720 3721 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3722 tx_port_num = mvpp2_egress_port(port); 3723 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3724 tx_port_num); 3725 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3726 3727 /* Close bandwidth for all queues */ 3728 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3729 ptxq = mvpp2_txq_phys(port->id, queue); 3730 mvpp2_write(port->priv, 3731 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3732 } 3733 3734 /* Set refill period to 1 usec, refill tokens 3735 * and bucket size to maximum 3736 */ 3737 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3738 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3739 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3740 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3741 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3742 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3743 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3745 3746 /* Set MaximumLowLatencyPacketSize value to 256 */ 3747 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3748 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3749 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3750 3751 /* Enable Rx cache snoop */ 3752 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3753 queue = port->rxqs[lrxq]->id; 3754 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3755 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3756 MVPP2_SNOOP_BUF_HDR_MASK; 3757 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3758 } 3759 } 3760 3761 /* Enable/disable receiving packets */ 3762 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3763 { 3764 u32 val; 3765 int lrxq, queue; 3766 3767 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3768 queue = port->rxqs[lrxq]->id; 3769 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3770 val &= ~MVPP2_RXQ_DISABLE_MASK; 3771 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3772 } 3773 } 3774 3775 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3776 { 3777 u32 val; 3778 int lrxq, queue; 3779 3780 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3781 queue = port->rxqs[lrxq]->id; 3782 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3783 val |= MVPP2_RXQ_DISABLE_MASK; 3784 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3785 } 3786 } 3787 3788 /* Enable transmit via physical egress queue 3789 * - HW starts take descriptors from DRAM 3790 */ 3791 static void mvpp2_egress_enable(struct mvpp2_port *port) 3792 { 3793 u32 qmap; 3794 int queue; 3795 int tx_port_num = mvpp2_egress_port(port); 3796 3797 /* Enable all initialized TXs. */ 3798 qmap = 0; 3799 for (queue = 0; queue < txq_number; queue++) { 3800 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3801 3802 if (txq->descs != NULL) 3803 qmap |= (1 << queue); 3804 } 3805 3806 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3807 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3808 } 3809 3810 /* Disable transmit via physical egress queue 3811 * - HW doesn't take descriptors from DRAM 3812 */ 3813 static void mvpp2_egress_disable(struct mvpp2_port *port) 3814 { 3815 u32 reg_data; 3816 int delay; 3817 int tx_port_num = mvpp2_egress_port(port); 3818 3819 /* Issue stop command for active channels only */ 3820 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3821 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3822 MVPP2_TXP_SCHED_ENQ_MASK; 3823 if (reg_data != 0) 3824 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3825 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3826 3827 /* Wait for all Tx activity to terminate. */ 3828 delay = 0; 3829 do { 3830 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3831 netdev_warn(port->dev, 3832 "Tx stop timed out, status=0x%08x\n", 3833 reg_data); 3834 break; 3835 } 3836 mdelay(1); 3837 delay++; 3838 3839 /* Check port TX Command register that all 3840 * Tx queues are stopped 3841 */ 3842 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3843 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3844 } 3845 3846 /* Rx descriptors helper methods */ 3847 3848 /* Get number of Rx descriptors occupied by received packets */ 3849 static inline int 3850 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3851 { 3852 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3853 3854 return val & MVPP2_RXQ_OCCUPIED_MASK; 3855 } 3856 3857 /* Update Rx queue status with the number of occupied and available 3858 * Rx descriptor slots. 3859 */ 3860 static inline void 3861 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3862 int used_count, int free_count) 3863 { 3864 /* Decrement the number of used descriptors and increment count 3865 * increment the number of free descriptors. 3866 */ 3867 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3868 3869 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3870 } 3871 3872 /* Get pointer to next RX descriptor to be processed by SW */ 3873 static inline struct mvpp2_rx_desc * 3874 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3875 { 3876 int rx_desc = rxq->next_desc_to_proc; 3877 3878 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3879 prefetch(rxq->descs + rxq->next_desc_to_proc); 3880 return rxq->descs + rx_desc; 3881 } 3882 3883 /* Set rx queue offset */ 3884 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3885 int prxq, int offset) 3886 { 3887 u32 val; 3888 3889 /* Convert offset from bytes to units of 32 bytes */ 3890 offset = offset >> 5; 3891 3892 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3893 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3894 3895 /* Offset is in */ 3896 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3897 MVPP2_RXQ_PACKET_OFFSET_MASK); 3898 3899 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3900 } 3901 3902 /* Obtain BM cookie information from descriptor */ 3903 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3904 struct mvpp2_rx_desc *rx_desc) 3905 { 3906 int cpu = smp_processor_id(); 3907 int pool; 3908 3909 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3910 MVPP2_RXD_BM_POOL_ID_MASK) >> 3911 MVPP2_RXD_BM_POOL_ID_OFFS; 3912 3913 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3914 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3915 } 3916 3917 /* Tx descriptors helper methods */ 3918 3919 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3920 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3921 struct mvpp2_tx_queue *txq) 3922 { 3923 u32 val; 3924 3925 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3926 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3927 3928 return val & MVPP2_TXQ_PENDING_MASK; 3929 } 3930 3931 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3932 static struct mvpp2_tx_desc * 3933 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3934 { 3935 int tx_desc = txq->next_desc_to_proc; 3936 3937 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3938 return txq->descs + tx_desc; 3939 } 3940 3941 /* Update HW with number of aggregated Tx descriptors to be sent */ 3942 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3943 { 3944 /* aggregated access - relevant TXQ number is written in TX desc */ 3945 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3946 } 3947 3948 /* Get number of sent descriptors and decrement counter. 3949 * The number of sent descriptors is returned. 3950 * Per-CPU access 3951 */ 3952 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3953 struct mvpp2_tx_queue *txq) 3954 { 3955 u32 val; 3956 3957 /* Reading status reg resets transmitted descriptor counter */ 3958 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3959 3960 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3961 MVPP2_TRANSMITTED_COUNT_OFFSET; 3962 } 3963 3964 static void mvpp2_txq_sent_counter_clear(void *arg) 3965 { 3966 struct mvpp2_port *port = arg; 3967 int queue; 3968 3969 for (queue = 0; queue < txq_number; queue++) { 3970 int id = port->txqs[queue]->id; 3971 3972 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3973 } 3974 } 3975 3976 /* Set max sizes for Tx queues */ 3977 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3978 { 3979 u32 val, size, mtu; 3980 int txq, tx_port_num; 3981 3982 mtu = port->pkt_size * 8; 3983 if (mtu > MVPP2_TXP_MTU_MAX) 3984 mtu = MVPP2_TXP_MTU_MAX; 3985 3986 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3987 mtu = 3 * mtu; 3988 3989 /* Indirect access to registers */ 3990 tx_port_num = mvpp2_egress_port(port); 3991 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3992 3993 /* Set MTU */ 3994 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 3995 val &= ~MVPP2_TXP_MTU_MAX; 3996 val |= mtu; 3997 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 3998 3999 /* TXP token size and all TXQs token size must be larger that MTU */ 4000 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4001 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4002 if (size < mtu) { 4003 size = mtu; 4004 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4005 val |= size; 4006 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4007 } 4008 4009 for (txq = 0; txq < txq_number; txq++) { 4010 val = mvpp2_read(port->priv, 4011 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4012 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4013 4014 if (size < mtu) { 4015 size = mtu; 4016 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4017 val |= size; 4018 mvpp2_write(port->priv, 4019 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4020 val); 4021 } 4022 } 4023 } 4024 4025 /* Free Tx queue skbuffs */ 4026 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4027 struct mvpp2_tx_queue *txq, 4028 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4029 { 4030 int i; 4031 4032 for (i = 0; i < num; i++) 4033 mvpp2_txq_inc_get(txq_pcpu); 4034 } 4035 4036 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4037 u32 cause) 4038 { 4039 int queue = fls(cause) - 1; 4040 4041 return port->rxqs[queue]; 4042 } 4043 4044 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4045 u32 cause) 4046 { 4047 int queue = fls(cause) - 1; 4048 4049 return port->txqs[queue]; 4050 } 4051 4052 /* Rx/Tx queue initialization/cleanup methods */ 4053 4054 /* Allocate and initialize descriptors for aggr TXQ */ 4055 static int mvpp2_aggr_txq_init(struct udevice *dev, 4056 struct mvpp2_tx_queue *aggr_txq, 4057 int desc_num, int cpu, 4058 struct mvpp2 *priv) 4059 { 4060 u32 txq_dma; 4061 4062 /* Allocate memory for TX descriptors */ 4063 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4064 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4065 if (!aggr_txq->descs) 4066 return -ENOMEM; 4067 4068 /* Make sure descriptor address is cache line size aligned */ 4069 BUG_ON(aggr_txq->descs != 4070 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4071 4072 aggr_txq->last_desc = aggr_txq->size - 1; 4073 4074 /* Aggr TXQ no reset WA */ 4075 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4076 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4077 4078 /* Set Tx descriptors queue starting address indirect 4079 * access 4080 */ 4081 if (priv->hw_version == MVPP21) 4082 txq_dma = aggr_txq->descs_dma; 4083 else 4084 txq_dma = aggr_txq->descs_dma >> 4085 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4086 4087 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4088 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4089 4090 return 0; 4091 } 4092 4093 /* Create a specified Rx queue */ 4094 static int mvpp2_rxq_init(struct mvpp2_port *port, 4095 struct mvpp2_rx_queue *rxq) 4096 4097 { 4098 u32 rxq_dma; 4099 4100 rxq->size = port->rx_ring_size; 4101 4102 /* Allocate memory for RX descriptors */ 4103 rxq->descs = buffer_loc.rx_descs; 4104 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4105 if (!rxq->descs) 4106 return -ENOMEM; 4107 4108 BUG_ON(rxq->descs != 4109 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4110 4111 rxq->last_desc = rxq->size - 1; 4112 4113 /* Zero occupied and non-occupied counters - direct access */ 4114 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4115 4116 /* Set Rx descriptors queue starting address - indirect access */ 4117 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4118 if (port->priv->hw_version == MVPP21) 4119 rxq_dma = rxq->descs_dma; 4120 else 4121 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4122 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4123 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4124 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4125 4126 /* Set Offset */ 4127 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4128 4129 /* Add number of descriptors ready for receiving packets */ 4130 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4131 4132 return 0; 4133 } 4134 4135 /* Push packets received by the RXQ to BM pool */ 4136 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4137 struct mvpp2_rx_queue *rxq) 4138 { 4139 int rx_received, i; 4140 4141 rx_received = mvpp2_rxq_received(port, rxq->id); 4142 if (!rx_received) 4143 return; 4144 4145 for (i = 0; i < rx_received; i++) { 4146 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4147 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4148 4149 mvpp2_pool_refill(port, bm, 4150 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4151 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4152 } 4153 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4154 } 4155 4156 /* Cleanup Rx queue */ 4157 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4158 struct mvpp2_rx_queue *rxq) 4159 { 4160 mvpp2_rxq_drop_pkts(port, rxq); 4161 4162 rxq->descs = NULL; 4163 rxq->last_desc = 0; 4164 rxq->next_desc_to_proc = 0; 4165 rxq->descs_dma = 0; 4166 4167 /* Clear Rx descriptors queue starting address and size; 4168 * free descriptor number 4169 */ 4170 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4171 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4172 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4173 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4174 } 4175 4176 /* Create and initialize a Tx queue */ 4177 static int mvpp2_txq_init(struct mvpp2_port *port, 4178 struct mvpp2_tx_queue *txq) 4179 { 4180 u32 val; 4181 int cpu, desc, desc_per_txq, tx_port_num; 4182 struct mvpp2_txq_pcpu *txq_pcpu; 4183 4184 txq->size = port->tx_ring_size; 4185 4186 /* Allocate memory for Tx descriptors */ 4187 txq->descs = buffer_loc.tx_descs; 4188 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4189 if (!txq->descs) 4190 return -ENOMEM; 4191 4192 /* Make sure descriptor address is cache line size aligned */ 4193 BUG_ON(txq->descs != 4194 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4195 4196 txq->last_desc = txq->size - 1; 4197 4198 /* Set Tx descriptors queue starting address - indirect access */ 4199 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4200 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4201 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4202 MVPP2_TXQ_DESC_SIZE_MASK); 4203 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4204 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4205 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4206 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4207 val &= ~MVPP2_TXQ_PENDING_MASK; 4208 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4209 4210 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4211 * for each existing TXQ. 4212 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4213 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4214 */ 4215 desc_per_txq = 16; 4216 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4217 (txq->log_id * desc_per_txq); 4218 4219 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4220 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4221 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4222 4223 /* WRR / EJP configuration - indirect access */ 4224 tx_port_num = mvpp2_egress_port(port); 4225 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4226 4227 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4228 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4229 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4230 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4231 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4232 4233 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4234 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4235 val); 4236 4237 for_each_present_cpu(cpu) { 4238 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4239 txq_pcpu->size = txq->size; 4240 } 4241 4242 return 0; 4243 } 4244 4245 /* Free allocated TXQ resources */ 4246 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4247 struct mvpp2_tx_queue *txq) 4248 { 4249 txq->descs = NULL; 4250 txq->last_desc = 0; 4251 txq->next_desc_to_proc = 0; 4252 txq->descs_dma = 0; 4253 4254 /* Set minimum bandwidth for disabled TXQs */ 4255 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4256 4257 /* Set Tx descriptors queue starting address and size */ 4258 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4259 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4260 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4261 } 4262 4263 /* Cleanup Tx ports */ 4264 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4265 { 4266 struct mvpp2_txq_pcpu *txq_pcpu; 4267 int delay, pending, cpu; 4268 u32 val; 4269 4270 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4271 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4272 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4273 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4274 4275 /* The napi queue has been stopped so wait for all packets 4276 * to be transmitted. 4277 */ 4278 delay = 0; 4279 do { 4280 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4281 netdev_warn(port->dev, 4282 "port %d: cleaning queue %d timed out\n", 4283 port->id, txq->log_id); 4284 break; 4285 } 4286 mdelay(1); 4287 delay++; 4288 4289 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4290 } while (pending); 4291 4292 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4293 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4294 4295 for_each_present_cpu(cpu) { 4296 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4297 4298 /* Release all packets */ 4299 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4300 4301 /* Reset queue */ 4302 txq_pcpu->count = 0; 4303 txq_pcpu->txq_put_index = 0; 4304 txq_pcpu->txq_get_index = 0; 4305 } 4306 } 4307 4308 /* Cleanup all Tx queues */ 4309 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4310 { 4311 struct mvpp2_tx_queue *txq; 4312 int queue; 4313 u32 val; 4314 4315 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4316 4317 /* Reset Tx ports and delete Tx queues */ 4318 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4319 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4320 4321 for (queue = 0; queue < txq_number; queue++) { 4322 txq = port->txqs[queue]; 4323 mvpp2_txq_clean(port, txq); 4324 mvpp2_txq_deinit(port, txq); 4325 } 4326 4327 mvpp2_txq_sent_counter_clear(port); 4328 4329 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4330 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4331 } 4332 4333 /* Cleanup all Rx queues */ 4334 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4335 { 4336 int queue; 4337 4338 for (queue = 0; queue < rxq_number; queue++) 4339 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4340 } 4341 4342 /* Init all Rx queues for port */ 4343 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4344 { 4345 int queue, err; 4346 4347 for (queue = 0; queue < rxq_number; queue++) { 4348 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4349 if (err) 4350 goto err_cleanup; 4351 } 4352 return 0; 4353 4354 err_cleanup: 4355 mvpp2_cleanup_rxqs(port); 4356 return err; 4357 } 4358 4359 /* Init all tx queues for port */ 4360 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4361 { 4362 struct mvpp2_tx_queue *txq; 4363 int queue, err; 4364 4365 for (queue = 0; queue < txq_number; queue++) { 4366 txq = port->txqs[queue]; 4367 err = mvpp2_txq_init(port, txq); 4368 if (err) 4369 goto err_cleanup; 4370 } 4371 4372 mvpp2_txq_sent_counter_clear(port); 4373 return 0; 4374 4375 err_cleanup: 4376 mvpp2_cleanup_txqs(port); 4377 return err; 4378 } 4379 4380 /* Adjust link */ 4381 static void mvpp2_link_event(struct mvpp2_port *port) 4382 { 4383 struct phy_device *phydev = port->phy_dev; 4384 int status_change = 0; 4385 u32 val; 4386 4387 if (phydev->link) { 4388 if ((port->speed != phydev->speed) || 4389 (port->duplex != phydev->duplex)) { 4390 u32 val; 4391 4392 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4393 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4394 MVPP2_GMAC_CONFIG_GMII_SPEED | 4395 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4396 MVPP2_GMAC_AN_SPEED_EN | 4397 MVPP2_GMAC_AN_DUPLEX_EN); 4398 4399 if (phydev->duplex) 4400 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4401 4402 if (phydev->speed == SPEED_1000) 4403 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4404 else if (phydev->speed == SPEED_100) 4405 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4406 4407 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4408 4409 port->duplex = phydev->duplex; 4410 port->speed = phydev->speed; 4411 } 4412 } 4413 4414 if (phydev->link != port->link) { 4415 if (!phydev->link) { 4416 port->duplex = -1; 4417 port->speed = 0; 4418 } 4419 4420 port->link = phydev->link; 4421 status_change = 1; 4422 } 4423 4424 if (status_change) { 4425 if (phydev->link) { 4426 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4427 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4428 MVPP2_GMAC_FORCE_LINK_DOWN); 4429 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4430 mvpp2_egress_enable(port); 4431 mvpp2_ingress_enable(port); 4432 } else { 4433 mvpp2_ingress_disable(port); 4434 mvpp2_egress_disable(port); 4435 } 4436 } 4437 } 4438 4439 /* Main RX/TX processing routines */ 4440 4441 /* Display more error info */ 4442 static void mvpp2_rx_error(struct mvpp2_port *port, 4443 struct mvpp2_rx_desc *rx_desc) 4444 { 4445 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4446 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4447 4448 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4449 case MVPP2_RXD_ERR_CRC: 4450 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4451 status, sz); 4452 break; 4453 case MVPP2_RXD_ERR_OVERRUN: 4454 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4455 status, sz); 4456 break; 4457 case MVPP2_RXD_ERR_RESOURCE: 4458 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4459 status, sz); 4460 break; 4461 } 4462 } 4463 4464 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4465 static int mvpp2_rx_refill(struct mvpp2_port *port, 4466 struct mvpp2_bm_pool *bm_pool, 4467 u32 bm, dma_addr_t dma_addr) 4468 { 4469 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4470 return 0; 4471 } 4472 4473 /* Set hw internals when starting port */ 4474 static void mvpp2_start_dev(struct mvpp2_port *port) 4475 { 4476 switch (port->phy_interface) { 4477 case PHY_INTERFACE_MODE_RGMII: 4478 case PHY_INTERFACE_MODE_RGMII_ID: 4479 case PHY_INTERFACE_MODE_SGMII: 4480 mvpp2_gmac_max_rx_size_set(port); 4481 default: 4482 break; 4483 } 4484 4485 mvpp2_txp_max_tx_size_set(port); 4486 4487 if (port->priv->hw_version == MVPP21) 4488 mvpp2_port_enable(port); 4489 else 4490 gop_port_enable(port, 1); 4491 } 4492 4493 /* Set hw internals when stopping port */ 4494 static void mvpp2_stop_dev(struct mvpp2_port *port) 4495 { 4496 /* Stop new packets from arriving to RXQs */ 4497 mvpp2_ingress_disable(port); 4498 4499 mvpp2_egress_disable(port); 4500 4501 if (port->priv->hw_version == MVPP21) 4502 mvpp2_port_disable(port); 4503 else 4504 gop_port_enable(port, 0); 4505 } 4506 4507 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4508 { 4509 struct phy_device *phy_dev; 4510 4511 if (!port->init || port->link == 0) { 4512 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 4513 port->phy_interface); 4514 port->phy_dev = phy_dev; 4515 if (!phy_dev) { 4516 netdev_err(port->dev, "cannot connect to phy\n"); 4517 return -ENODEV; 4518 } 4519 phy_dev->supported &= PHY_GBIT_FEATURES; 4520 phy_dev->advertising = phy_dev->supported; 4521 4522 port->phy_dev = phy_dev; 4523 port->link = 0; 4524 port->duplex = 0; 4525 port->speed = 0; 4526 4527 phy_config(phy_dev); 4528 phy_startup(phy_dev); 4529 if (!phy_dev->link) { 4530 printf("%s: No link\n", phy_dev->dev->name); 4531 return -1; 4532 } 4533 4534 port->init = 1; 4535 } else { 4536 mvpp2_egress_enable(port); 4537 mvpp2_ingress_enable(port); 4538 } 4539 4540 return 0; 4541 } 4542 4543 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4544 { 4545 unsigned char mac_bcast[ETH_ALEN] = { 4546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4547 int err; 4548 4549 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4550 if (err) { 4551 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4552 return err; 4553 } 4554 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4555 port->dev_addr, true); 4556 if (err) { 4557 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4558 return err; 4559 } 4560 err = mvpp2_prs_def_flow(port); 4561 if (err) { 4562 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4563 return err; 4564 } 4565 4566 /* Allocate the Rx/Tx queues */ 4567 err = mvpp2_setup_rxqs(port); 4568 if (err) { 4569 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4570 return err; 4571 } 4572 4573 err = mvpp2_setup_txqs(port); 4574 if (err) { 4575 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4576 return err; 4577 } 4578 4579 if (port->phy_node) { 4580 err = mvpp2_phy_connect(dev, port); 4581 if (err < 0) 4582 return err; 4583 4584 mvpp2_link_event(port); 4585 } else { 4586 mvpp2_egress_enable(port); 4587 mvpp2_ingress_enable(port); 4588 } 4589 4590 mvpp2_start_dev(port); 4591 4592 return 0; 4593 } 4594 4595 /* No Device ops here in U-Boot */ 4596 4597 /* Driver initialization */ 4598 4599 static void mvpp2_port_power_up(struct mvpp2_port *port) 4600 { 4601 struct mvpp2 *priv = port->priv; 4602 4603 /* On PPv2.2 the GoP / interface configuration has already been done */ 4604 if (priv->hw_version == MVPP21) 4605 mvpp2_port_mii_set(port); 4606 mvpp2_port_periodic_xon_disable(port); 4607 if (priv->hw_version == MVPP21) 4608 mvpp2_port_fc_adv_enable(port); 4609 mvpp2_port_reset(port); 4610 } 4611 4612 /* Initialize port HW */ 4613 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4614 { 4615 struct mvpp2 *priv = port->priv; 4616 struct mvpp2_txq_pcpu *txq_pcpu; 4617 int queue, cpu, err; 4618 4619 if (port->first_rxq + rxq_number > 4620 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4621 return -EINVAL; 4622 4623 /* Disable port */ 4624 mvpp2_egress_disable(port); 4625 if (priv->hw_version == MVPP21) 4626 mvpp2_port_disable(port); 4627 else 4628 gop_port_enable(port, 0); 4629 4630 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4631 GFP_KERNEL); 4632 if (!port->txqs) 4633 return -ENOMEM; 4634 4635 /* Associate physical Tx queues to this port and initialize. 4636 * The mapping is predefined. 4637 */ 4638 for (queue = 0; queue < txq_number; queue++) { 4639 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4640 struct mvpp2_tx_queue *txq; 4641 4642 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4643 if (!txq) 4644 return -ENOMEM; 4645 4646 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4647 GFP_KERNEL); 4648 if (!txq->pcpu) 4649 return -ENOMEM; 4650 4651 txq->id = queue_phy_id; 4652 txq->log_id = queue; 4653 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4654 for_each_present_cpu(cpu) { 4655 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4656 txq_pcpu->cpu = cpu; 4657 } 4658 4659 port->txqs[queue] = txq; 4660 } 4661 4662 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4663 GFP_KERNEL); 4664 if (!port->rxqs) 4665 return -ENOMEM; 4666 4667 /* Allocate and initialize Rx queue for this port */ 4668 for (queue = 0; queue < rxq_number; queue++) { 4669 struct mvpp2_rx_queue *rxq; 4670 4671 /* Map physical Rx queue to port's logical Rx queue */ 4672 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4673 if (!rxq) 4674 return -ENOMEM; 4675 /* Map this Rx queue to a physical queue */ 4676 rxq->id = port->first_rxq + queue; 4677 rxq->port = port->id; 4678 rxq->logic_rxq = queue; 4679 4680 port->rxqs[queue] = rxq; 4681 } 4682 4683 /* Configure Rx queue group interrupt for this port */ 4684 if (priv->hw_version == MVPP21) { 4685 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), 4686 CONFIG_MV_ETH_RXQ); 4687 } else { 4688 u32 val; 4689 4690 val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 4691 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 4692 4693 val = (CONFIG_MV_ETH_RXQ << 4694 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 4695 mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 4696 } 4697 4698 /* Create Rx descriptor rings */ 4699 for (queue = 0; queue < rxq_number; queue++) { 4700 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4701 4702 rxq->size = port->rx_ring_size; 4703 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4704 rxq->time_coal = MVPP2_RX_COAL_USEC; 4705 } 4706 4707 mvpp2_ingress_disable(port); 4708 4709 /* Port default configuration */ 4710 mvpp2_defaults_set(port); 4711 4712 /* Port's classifier configuration */ 4713 mvpp2_cls_oversize_rxq_set(port); 4714 mvpp2_cls_port_config(port); 4715 4716 /* Provide an initial Rx packet size */ 4717 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4718 4719 /* Initialize pools for swf */ 4720 err = mvpp2_swf_bm_pool_init(port); 4721 if (err) 4722 return err; 4723 4724 return 0; 4725 } 4726 4727 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4728 { 4729 int port_node = dev_of_offset(dev); 4730 const char *phy_mode_str; 4731 int phy_node, mdio_off, cp_node; 4732 u32 id; 4733 u32 phyaddr = 0; 4734 int phy_mode = -1; 4735 u64 mdio_addr; 4736 4737 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4738 4739 if (phy_node > 0) { 4740 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4741 if (phyaddr < 0) { 4742 dev_err(&pdev->dev, "could not find phy address\n"); 4743 return -1; 4744 } 4745 mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node); 4746 4747 /* TODO: This WA for mdio issue. U-boot 2017 don't have 4748 * mdio driver and on MACHIATOBin board ports from CP1 4749 * connected to mdio on CP0. 4750 * WA is to get mdio address from phy handler parent 4751 * base address. WA should be removed after 4752 * mdio driver implementation. 4753 */ 4754 mdio_addr = fdtdec_get_uint(gd->fdt_blob, 4755 mdio_off, "reg", 0); 4756 4757 cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off); 4758 mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob, 4759 cp_node); 4760 4761 port->priv->mdio_base = (void *)mdio_addr; 4762 4763 if (port->priv->mdio_base < 0) { 4764 dev_err(&pdev->dev, "could not find mdio base address\n"); 4765 return -1; 4766 } 4767 } else { 4768 phy_node = 0; 4769 } 4770 4771 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4772 if (phy_mode_str) 4773 phy_mode = phy_get_interface_by_name(phy_mode_str); 4774 if (phy_mode == -1) { 4775 dev_err(&pdev->dev, "incorrect phy mode\n"); 4776 return -EINVAL; 4777 } 4778 4779 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4780 if (id == -1) { 4781 dev_err(&pdev->dev, "missing port-id value\n"); 4782 return -EINVAL; 4783 } 4784 4785 #ifdef CONFIG_DM_GPIO 4786 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4787 &port->phy_reset_gpio, GPIOD_IS_OUT); 4788 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4789 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4790 #endif 4791 4792 /* 4793 * ToDo: 4794 * Not sure if this DT property "phy-speed" will get accepted, so 4795 * this might change later 4796 */ 4797 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4798 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4799 "phy-speed", 1000); 4800 4801 port->id = id; 4802 if (port->priv->hw_version == MVPP21) 4803 port->first_rxq = port->id * rxq_number; 4804 else 4805 port->first_rxq = port->id * port->priv->max_port_rxqs; 4806 port->phy_node = phy_node; 4807 port->phy_interface = phy_mode; 4808 port->phyaddr = phyaddr; 4809 4810 return 0; 4811 } 4812 4813 #ifdef CONFIG_DM_GPIO 4814 /* Port GPIO initialization */ 4815 static void mvpp2_gpio_init(struct mvpp2_port *port) 4816 { 4817 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4818 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4819 udelay(1000); 4820 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4821 } 4822 4823 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4824 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4825 } 4826 #endif 4827 4828 /* Ports initialization */ 4829 static int mvpp2_port_probe(struct udevice *dev, 4830 struct mvpp2_port *port, 4831 int port_node, 4832 struct mvpp2 *priv) 4833 { 4834 int err; 4835 4836 port->tx_ring_size = MVPP2_MAX_TXD; 4837 port->rx_ring_size = MVPP2_MAX_RXD; 4838 4839 err = mvpp2_port_init(dev, port); 4840 if (err < 0) { 4841 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4842 return err; 4843 } 4844 mvpp2_port_power_up(port); 4845 4846 #ifdef CONFIG_DM_GPIO 4847 mvpp2_gpio_init(port); 4848 #endif 4849 4850 priv->port_list[port->id] = port; 4851 return 0; 4852 } 4853 4854 /* Initialize decoding windows */ 4855 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4856 struct mvpp2 *priv) 4857 { 4858 u32 win_enable; 4859 int i; 4860 4861 for (i = 0; i < 6; i++) { 4862 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4863 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4864 4865 if (i < 4) 4866 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4867 } 4868 4869 win_enable = 0; 4870 4871 for (i = 0; i < dram->num_cs; i++) { 4872 const struct mbus_dram_window *cs = dram->cs + i; 4873 4874 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4875 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4876 dram->mbus_dram_target_id); 4877 4878 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4879 (cs->size - 1) & 0xffff0000); 4880 4881 win_enable |= (1 << i); 4882 } 4883 4884 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4885 } 4886 4887 /* Initialize Rx FIFO's */ 4888 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4889 { 4890 int port; 4891 4892 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4893 if (priv->hw_version == MVPP22) { 4894 if (port == 0) { 4895 mvpp2_write(priv, 4896 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4897 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4898 mvpp2_write(priv, 4899 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4900 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4901 } else if (port == 1) { 4902 mvpp2_write(priv, 4903 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4904 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4905 mvpp2_write(priv, 4906 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4907 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4908 } else { 4909 mvpp2_write(priv, 4910 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4911 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4912 mvpp2_write(priv, 4913 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4914 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4915 } 4916 } else { 4917 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4918 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4919 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4920 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4921 } 4922 } 4923 4924 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4925 MVPP2_RX_FIFO_PORT_MIN_PKT); 4926 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4927 } 4928 4929 /* Initialize Tx FIFO's */ 4930 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4931 { 4932 int port, val; 4933 4934 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4935 /* Port 0 supports 10KB TX FIFO */ 4936 if (port == 0) { 4937 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4938 MVPP22_TX_FIFO_SIZE_MASK; 4939 } else { 4940 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4941 MVPP22_TX_FIFO_SIZE_MASK; 4942 } 4943 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4944 } 4945 } 4946 4947 static void mvpp2_axi_init(struct mvpp2 *priv) 4948 { 4949 u32 val, rdval, wrval; 4950 4951 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4952 4953 /* AXI Bridge Configuration */ 4954 4955 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4956 << MVPP22_AXI_ATTR_CACHE_OFFS; 4957 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4958 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4959 4960 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4961 << MVPP22_AXI_ATTR_CACHE_OFFS; 4962 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4963 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4964 4965 /* BM */ 4966 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4967 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4968 4969 /* Descriptors */ 4970 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4971 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4972 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4973 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4974 4975 /* Buffer Data */ 4976 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4977 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4978 4979 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4980 << MVPP22_AXI_CODE_CACHE_OFFS; 4981 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4982 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4983 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4984 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4985 4986 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4987 << MVPP22_AXI_CODE_CACHE_OFFS; 4988 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4989 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4990 4991 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4992 4993 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4994 << MVPP22_AXI_CODE_CACHE_OFFS; 4995 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4996 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4997 4998 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 4999 } 5000 5001 /* Initialize network controller common part HW */ 5002 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 5003 { 5004 const struct mbus_dram_target_info *dram_target_info; 5005 int err, i; 5006 u32 val; 5007 5008 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 5009 if ((rxq_number > priv->max_port_rxqs) || 5010 (txq_number > MVPP2_MAX_TXQ)) { 5011 dev_err(&pdev->dev, "invalid queue size parameter\n"); 5012 return -EINVAL; 5013 } 5014 5015 /* MBUS windows configuration */ 5016 dram_target_info = mvebu_mbus_dram_info(); 5017 if (dram_target_info) 5018 mvpp2_conf_mbus_windows(dram_target_info, priv); 5019 5020 if (priv->hw_version == MVPP22) 5021 mvpp2_axi_init(priv); 5022 5023 if (priv->hw_version == MVPP21) { 5024 /* Disable HW PHY polling */ 5025 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5026 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5027 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5028 } else { 5029 /* Enable HW PHY polling */ 5030 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5031 val |= MVPP22_SMI_POLLING_EN; 5032 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5033 } 5034 5035 /* Allocate and initialize aggregated TXQs */ 5036 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5037 sizeof(struct mvpp2_tx_queue), 5038 GFP_KERNEL); 5039 if (!priv->aggr_txqs) 5040 return -ENOMEM; 5041 5042 for_each_present_cpu(i) { 5043 priv->aggr_txqs[i].id = i; 5044 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5045 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5046 MVPP2_AGGR_TXQ_SIZE, i, priv); 5047 if (err < 0) 5048 return err; 5049 } 5050 5051 /* Rx Fifo Init */ 5052 mvpp2_rx_fifo_init(priv); 5053 5054 /* Tx Fifo Init */ 5055 if (priv->hw_version == MVPP22) 5056 mvpp2_tx_fifo_init(priv); 5057 5058 /* Reset Rx queue group interrupt configuration */ 5059 for (i = 0; i < MVPP2_MAX_PORTS; i++) { 5060 if (priv->hw_version == MVPP21) { 5061 mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), 5062 CONFIG_MV_ETH_RXQ); 5063 continue; 5064 } else { 5065 u32 val; 5066 5067 val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); 5068 mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); 5069 5070 val = (CONFIG_MV_ETH_RXQ << 5071 MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); 5072 mvpp2_write(priv, 5073 MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); 5074 } 5075 } 5076 5077 if (priv->hw_version == MVPP21) 5078 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5079 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5080 5081 /* Allow cache snoop when transmiting packets */ 5082 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5083 5084 /* Buffer Manager initialization */ 5085 err = mvpp2_bm_init(dev, priv); 5086 if (err < 0) 5087 return err; 5088 5089 /* Parser default initialization */ 5090 err = mvpp2_prs_default_init(dev, priv); 5091 if (err < 0) 5092 return err; 5093 5094 /* Classifier default initialization */ 5095 mvpp2_cls_init(priv); 5096 5097 return 0; 5098 } 5099 5100 /* SMI / MDIO functions */ 5101 5102 static int smi_wait_ready(struct mvpp2 *priv) 5103 { 5104 u32 timeout = MVPP2_SMI_TIMEOUT; 5105 u32 smi_reg; 5106 5107 /* wait till the SMI is not busy */ 5108 do { 5109 /* read smi register */ 5110 smi_reg = readl(priv->mdio_base); 5111 if (timeout-- == 0) { 5112 printf("Error: SMI busy timeout\n"); 5113 return -EFAULT; 5114 } 5115 } while (smi_reg & MVPP2_SMI_BUSY); 5116 5117 return 0; 5118 } 5119 5120 /* 5121 * mpp2_mdio_read - miiphy_read callback function. 5122 * 5123 * Returns 16bit phy register value, or 0xffff on error 5124 */ 5125 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5126 { 5127 struct mvpp2 *priv = bus->priv; 5128 u32 smi_reg; 5129 u32 timeout; 5130 5131 /* check parameters */ 5132 if (addr > MVPP2_PHY_ADDR_MASK) { 5133 printf("Error: Invalid PHY address %d\n", addr); 5134 return -EFAULT; 5135 } 5136 5137 if (reg > MVPP2_PHY_REG_MASK) { 5138 printf("Err: Invalid register offset %d\n", reg); 5139 return -EFAULT; 5140 } 5141 5142 /* wait till the SMI is not busy */ 5143 if (smi_wait_ready(priv) < 0) 5144 return -EFAULT; 5145 5146 /* fill the phy address and regiser offset and read opcode */ 5147 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5148 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5149 | MVPP2_SMI_OPCODE_READ; 5150 5151 /* write the smi register */ 5152 writel(smi_reg, priv->mdio_base); 5153 5154 /* wait till read value is ready */ 5155 timeout = MVPP2_SMI_TIMEOUT; 5156 5157 do { 5158 /* read smi register */ 5159 smi_reg = readl(priv->mdio_base); 5160 if (timeout-- == 0) { 5161 printf("Err: SMI read ready timeout\n"); 5162 return -EFAULT; 5163 } 5164 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5165 5166 /* Wait for the data to update in the SMI register */ 5167 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5168 ; 5169 5170 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5171 } 5172 5173 /* 5174 * mpp2_mdio_write - miiphy_write callback function. 5175 * 5176 * Returns 0 if write succeed, -EINVAL on bad parameters 5177 * -ETIME on timeout 5178 */ 5179 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5180 u16 value) 5181 { 5182 struct mvpp2 *priv = bus->priv; 5183 u32 smi_reg; 5184 5185 /* check parameters */ 5186 if (addr > MVPP2_PHY_ADDR_MASK) { 5187 printf("Error: Invalid PHY address %d\n", addr); 5188 return -EFAULT; 5189 } 5190 5191 if (reg > MVPP2_PHY_REG_MASK) { 5192 printf("Err: Invalid register offset %d\n", reg); 5193 return -EFAULT; 5194 } 5195 5196 /* wait till the SMI is not busy */ 5197 if (smi_wait_ready(priv) < 0) 5198 return -EFAULT; 5199 5200 /* fill the phy addr and reg offset and write opcode and data */ 5201 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5202 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5203 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5204 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5205 5206 /* write the smi register */ 5207 writel(smi_reg, priv->mdio_base); 5208 5209 return 0; 5210 } 5211 5212 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5213 { 5214 struct mvpp2_port *port = dev_get_priv(dev); 5215 struct mvpp2_rx_desc *rx_desc; 5216 struct mvpp2_bm_pool *bm_pool; 5217 dma_addr_t dma_addr; 5218 u32 bm, rx_status; 5219 int pool, rx_bytes, err; 5220 int rx_received; 5221 struct mvpp2_rx_queue *rxq; 5222 u32 cause_rx_tx, cause_rx, cause_misc; 5223 u8 *data; 5224 5225 cause_rx_tx = mvpp2_read(port->priv, 5226 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5227 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5228 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5229 if (!cause_rx_tx && !cause_misc) 5230 return 0; 5231 5232 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5233 5234 /* Process RX packets */ 5235 cause_rx |= port->pending_cause_rx; 5236 rxq = mvpp2_get_rx_queue(port, cause_rx); 5237 5238 /* Get number of received packets and clamp the to-do */ 5239 rx_received = mvpp2_rxq_received(port, rxq->id); 5240 5241 /* Return if no packets are received */ 5242 if (!rx_received) 5243 return 0; 5244 5245 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5246 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5247 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5248 rx_bytes -= MVPP2_MH_SIZE; 5249 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5250 5251 bm = mvpp2_bm_cookie_build(port, rx_desc); 5252 pool = mvpp2_bm_cookie_pool_get(bm); 5253 bm_pool = &port->priv->bm_pools[pool]; 5254 5255 /* In case of an error, release the requested buffer pointer 5256 * to the Buffer Manager. This request process is controlled 5257 * by the hardware, and the information about the buffer is 5258 * comprised by the RX descriptor. 5259 */ 5260 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5261 mvpp2_rx_error(port, rx_desc); 5262 /* Return the buffer to the pool */ 5263 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5264 return 0; 5265 } 5266 5267 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5268 if (err) { 5269 netdev_err(port->dev, "failed to refill BM pools\n"); 5270 return 0; 5271 } 5272 5273 /* Update Rx queue management counters */ 5274 mb(); 5275 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5276 5277 /* give packet to stack - skip on first n bytes */ 5278 data = (u8 *)dma_addr + 2 + 32; 5279 5280 if (rx_bytes <= 0) 5281 return 0; 5282 5283 /* 5284 * No cache invalidation needed here, since the rx_buffer's are 5285 * located in a uncached memory region 5286 */ 5287 *packetp = data; 5288 5289 return rx_bytes; 5290 } 5291 5292 /* Drain Txq */ 5293 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 5294 int enable) 5295 { 5296 u32 val; 5297 5298 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5299 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 5300 if (enable) 5301 val |= MVPP2_TXQ_DRAIN_EN_MASK; 5302 else 5303 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 5304 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 5305 } 5306 5307 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5308 { 5309 struct mvpp2_port *port = dev_get_priv(dev); 5310 struct mvpp2_tx_queue *txq, *aggr_txq; 5311 struct mvpp2_tx_desc *tx_desc; 5312 int tx_done; 5313 int timeout; 5314 5315 txq = port->txqs[0]; 5316 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5317 5318 /* Get a descriptor for the first part of the packet */ 5319 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5320 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5321 mvpp2_txdesc_size_set(port, tx_desc, length); 5322 mvpp2_txdesc_offset_set(port, tx_desc, 5323 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5324 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5325 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5326 /* First and Last descriptor */ 5327 mvpp2_txdesc_cmd_set(port, tx_desc, 5328 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5329 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5330 5331 /* Flush tx data */ 5332 flush_dcache_range((unsigned long)packet, 5333 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5334 5335 /* Enable transmit */ 5336 mb(); 5337 mvpp2_aggr_txq_pend_desc_add(port, 1); 5338 5339 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5340 5341 timeout = 0; 5342 do { 5343 if (timeout++ > 10000) { 5344 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5345 return 0; 5346 } 5347 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5348 } while (tx_done); 5349 5350 /* Enable TXQ drain */ 5351 mvpp2_txq_drain(port, txq, 1); 5352 5353 timeout = 0; 5354 do { 5355 if (timeout++ > 10000) { 5356 printf("timeout: packet not sent\n"); 5357 return 0; 5358 } 5359 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5360 } while (!tx_done); 5361 5362 /* Disable TXQ drain */ 5363 mvpp2_txq_drain(port, txq, 0); 5364 5365 return 0; 5366 } 5367 5368 static int mvpp2_start(struct udevice *dev) 5369 { 5370 struct eth_pdata *pdata = dev_get_platdata(dev); 5371 struct mvpp2_port *port = dev_get_priv(dev); 5372 5373 /* Load current MAC address */ 5374 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5375 5376 /* Reconfigure parser accept the original MAC address */ 5377 mvpp2_prs_update_mac_da(port, port->dev_addr); 5378 5379 switch (port->phy_interface) { 5380 case PHY_INTERFACE_MODE_RGMII: 5381 case PHY_INTERFACE_MODE_RGMII_ID: 5382 case PHY_INTERFACE_MODE_SGMII: 5383 mvpp2_port_power_up(port); 5384 default: 5385 break; 5386 } 5387 5388 mvpp2_open(dev, port); 5389 5390 return 0; 5391 } 5392 5393 static void mvpp2_stop(struct udevice *dev) 5394 { 5395 struct mvpp2_port *port = dev_get_priv(dev); 5396 5397 mvpp2_stop_dev(port); 5398 mvpp2_cleanup_rxqs(port); 5399 mvpp2_cleanup_txqs(port); 5400 } 5401 5402 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5403 { 5404 writel(port->phyaddr, port->priv->iface_base + 5405 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5406 5407 return 0; 5408 } 5409 5410 static int mvpp2_base_probe(struct udevice *dev) 5411 { 5412 struct mvpp2 *priv = dev_get_priv(dev); 5413 struct mii_dev *bus; 5414 void *bd_space; 5415 u32 size = 0; 5416 int i; 5417 5418 /* Save hw-version */ 5419 priv->hw_version = dev_get_driver_data(dev); 5420 5421 /* 5422 * U-Boot special buffer handling: 5423 * 5424 * Allocate buffer area for descs and rx_buffers. This is only 5425 * done once for all interfaces. As only one interface can 5426 * be active. Make this area DMA-safe by disabling the D-cache 5427 */ 5428 5429 /* Align buffer area for descs and rx_buffers to 1MiB */ 5430 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5431 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5432 BD_SPACE, DCACHE_OFF); 5433 5434 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5435 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5436 5437 buffer_loc.tx_descs = 5438 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5439 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5440 5441 buffer_loc.rx_descs = 5442 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5443 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5444 5445 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5446 buffer_loc.bm_pool[i] = 5447 (unsigned long *)((unsigned long)bd_space + size); 5448 if (priv->hw_version == MVPP21) 5449 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5450 else 5451 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5452 } 5453 5454 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5455 buffer_loc.rx_buffer[i] = 5456 (unsigned long *)((unsigned long)bd_space + size); 5457 size += RX_BUFFER_SIZE; 5458 } 5459 5460 /* Clear the complete area so that all descriptors are cleared */ 5461 memset(bd_space, 0, size); 5462 5463 /* Save base addresses for later use */ 5464 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5465 if (IS_ERR(priv->base)) 5466 return PTR_ERR(priv->base); 5467 5468 if (priv->hw_version == MVPP21) { 5469 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5470 if (IS_ERR(priv->lms_base)) 5471 return PTR_ERR(priv->lms_base); 5472 5473 priv->mdio_base = priv->lms_base + MVPP21_SMI; 5474 } else { 5475 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5476 if (IS_ERR(priv->iface_base)) 5477 return PTR_ERR(priv->iface_base); 5478 5479 priv->mdio_base = priv->iface_base + MVPP22_SMI; 5480 5481 /* Store common base addresses for all ports */ 5482 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5483 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5484 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5485 } 5486 5487 if (priv->hw_version == MVPP21) 5488 priv->max_port_rxqs = 8; 5489 else 5490 priv->max_port_rxqs = 32; 5491 5492 /* Finally create and register the MDIO bus driver */ 5493 bus = mdio_alloc(); 5494 if (!bus) { 5495 printf("Failed to allocate MDIO bus\n"); 5496 return -ENOMEM; 5497 } 5498 5499 bus->read = mpp2_mdio_read; 5500 bus->write = mpp2_mdio_write; 5501 snprintf(bus->name, sizeof(bus->name), dev->name); 5502 bus->priv = (void *)priv; 5503 priv->bus = bus; 5504 5505 return mdio_register(bus); 5506 } 5507 5508 static int mvpp2_probe(struct udevice *dev) 5509 { 5510 struct mvpp2_port *port = dev_get_priv(dev); 5511 struct mvpp2 *priv = dev_get_priv(dev->parent); 5512 int err; 5513 5514 /* Only call the probe function for the parent once */ 5515 if (!priv->probe_done) { 5516 err = mvpp2_base_probe(dev->parent); 5517 priv->probe_done = 1; 5518 } 5519 5520 port->priv = dev_get_priv(dev->parent); 5521 5522 err = phy_info_parse(dev, port); 5523 if (err) 5524 return err; 5525 5526 /* 5527 * We need the port specific io base addresses at this stage, since 5528 * gop_port_init() accesses these registers 5529 */ 5530 if (priv->hw_version == MVPP21) { 5531 int priv_common_regs_num = 2; 5532 5533 port->base = (void __iomem *)devfdt_get_addr_index( 5534 dev->parent, priv_common_regs_num + port->id); 5535 if (IS_ERR(port->base)) 5536 return PTR_ERR(port->base); 5537 } else { 5538 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5539 "gop-port-id", -1); 5540 if (port->id == -1) { 5541 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5542 return -EINVAL; 5543 } 5544 5545 port->base = priv->iface_base + MVPP22_PORT_BASE + 5546 port->gop_id * MVPP22_PORT_OFFSET; 5547 5548 /* Set phy address of the port */ 5549 if(port->phy_node) 5550 mvpp22_smi_phy_addr_cfg(port); 5551 5552 /* GoP Init */ 5553 gop_port_init(port); 5554 } 5555 5556 /* Initialize network controller */ 5557 err = mvpp2_init(dev, priv); 5558 if (err < 0) { 5559 dev_err(&pdev->dev, "failed to initialize controller\n"); 5560 return err; 5561 } 5562 5563 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5564 if (err) 5565 return err; 5566 5567 if (priv->hw_version == MVPP22) { 5568 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5569 port->phy_interface); 5570 5571 /* Netcomplex configurations for all ports */ 5572 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5573 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5574 } 5575 5576 return 0; 5577 } 5578 5579 /* 5580 * Empty BM pool and stop its activity before the OS is started 5581 */ 5582 static int mvpp2_remove(struct udevice *dev) 5583 { 5584 struct mvpp2_port *port = dev_get_priv(dev); 5585 struct mvpp2 *priv = port->priv; 5586 int i; 5587 5588 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5589 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5590 5591 return 0; 5592 } 5593 5594 static const struct eth_ops mvpp2_ops = { 5595 .start = mvpp2_start, 5596 .send = mvpp2_send, 5597 .recv = mvpp2_recv, 5598 .stop = mvpp2_stop, 5599 }; 5600 5601 static struct driver mvpp2_driver = { 5602 .name = "mvpp2", 5603 .id = UCLASS_ETH, 5604 .probe = mvpp2_probe, 5605 .remove = mvpp2_remove, 5606 .ops = &mvpp2_ops, 5607 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5608 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5609 .flags = DM_FLAG_ACTIVE_DMA, 5610 }; 5611 5612 /* 5613 * Use a MISC device to bind the n instances (child nodes) of the 5614 * network base controller in UCLASS_ETH. 5615 */ 5616 static int mvpp2_base_bind(struct udevice *parent) 5617 { 5618 const void *blob = gd->fdt_blob; 5619 int node = dev_of_offset(parent); 5620 struct uclass_driver *drv; 5621 struct udevice *dev; 5622 struct eth_pdata *plat; 5623 char *name; 5624 int subnode; 5625 u32 id; 5626 int base_id_add; 5627 5628 /* Lookup eth driver */ 5629 drv = lists_uclass_lookup(UCLASS_ETH); 5630 if (!drv) { 5631 puts("Cannot find eth driver\n"); 5632 return -ENOENT; 5633 } 5634 5635 base_id_add = base_id; 5636 5637 fdt_for_each_subnode(subnode, blob, node) { 5638 /* Increment base_id for all subnodes, also the disabled ones */ 5639 base_id++; 5640 5641 /* Skip disabled ports */ 5642 if (!fdtdec_get_is_enabled(blob, subnode)) 5643 continue; 5644 5645 plat = calloc(1, sizeof(*plat)); 5646 if (!plat) 5647 return -ENOMEM; 5648 5649 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5650 id += base_id_add; 5651 5652 name = calloc(1, 16); 5653 sprintf(name, "mvpp2-%d", id); 5654 5655 /* Create child device UCLASS_ETH and bind it */ 5656 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5657 dev_set_of_offset(dev, subnode); 5658 } 5659 5660 return 0; 5661 } 5662 5663 static const struct udevice_id mvpp2_ids[] = { 5664 { 5665 .compatible = "marvell,armada-375-pp2", 5666 .data = MVPP21, 5667 }, 5668 { 5669 .compatible = "marvell,armada-7k-pp22", 5670 .data = MVPP22, 5671 }, 5672 { } 5673 }; 5674 5675 U_BOOT_DRIVER(mvpp2_base) = { 5676 .name = "mvpp2_base", 5677 .id = UCLASS_MISC, 5678 .of_match = mvpp2_ids, 5679 .bind = mvpp2_base_bind, 5680 .priv_auto_alloc_size = sizeof(struct mvpp2), 5681 }; 5682