1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 #include <asm-generic/gpio.h> 34 #include <fdt_support.h> 35 36 DECLARE_GLOBAL_DATA_PTR; 37 38 /* Some linux -> U-Boot compatibility stuff */ 39 #define netdev_err(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_warn(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_info(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 #define netdev_dbg(dev, fmt, args...) \ 46 printf(fmt, ##args) 47 48 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 49 50 #define __verify_pcpu_ptr(ptr) \ 51 do { \ 52 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 53 (void)__vpp_verify; \ 54 } while (0) 55 56 #define VERIFY_PERCPU_PTR(__p) \ 57 ({ \ 58 __verify_pcpu_ptr(__p); \ 59 (typeof(*(__p)) __kernel __force *)(__p); \ 60 }) 61 62 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 63 #define smp_processor_id() 0 64 #define num_present_cpus() 1 65 #define for_each_present_cpu(cpu) \ 66 for ((cpu) = 0; (cpu) < 1; (cpu)++) 67 68 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 69 70 #define CONFIG_NR_CPUS 1 71 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 72 73 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 74 #define WRAP (2 + ETH_HLEN + 4 + 32) 75 #define MTU 1500 76 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 77 78 #define MVPP2_SMI_TIMEOUT 10000 79 80 /* RX Fifo Registers */ 81 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 82 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 83 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 84 #define MVPP2_RX_FIFO_INIT_REG 0x64 85 86 /* RX DMA Top Registers */ 87 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 88 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 89 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 90 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 91 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 92 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 93 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 94 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 95 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 96 #define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 97 #define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 98 #define MVPP2_RXQ_POOL_LONG_OFFS 24 99 #define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 100 #define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 101 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 102 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 103 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 104 105 /* Parser Registers */ 106 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 107 #define MVPP2_PRS_PORT_LU_MAX 0xf 108 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 109 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 110 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 111 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 112 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 113 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 114 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 115 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 116 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 117 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 118 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 119 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 120 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 121 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 122 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 123 124 /* Classifier Registers */ 125 #define MVPP2_CLS_MODE_REG 0x1800 126 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 127 #define MVPP2_CLS_PORT_WAY_REG 0x1810 128 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 129 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 130 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 131 #define MVPP2_CLS_LKP_TBL_REG 0x1818 132 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 133 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 134 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 135 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 136 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 137 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 138 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 139 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 140 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 141 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 142 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 143 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 144 145 /* Descriptor Manager Top Registers */ 146 #define MVPP2_RXQ_NUM_REG 0x2040 147 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 148 #define MVPP22_DESC_ADDR_OFFS 8 149 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 150 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 151 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 152 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 153 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 154 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 155 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 156 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 157 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 158 #define MVPP2_RXQ_THRESH_REG 0x204c 159 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 160 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 161 #define MVPP2_RXQ_INDEX_REG 0x2050 162 #define MVPP2_TXQ_NUM_REG 0x2080 163 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 164 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 165 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 166 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 167 #define MVPP2_TXQ_THRESH_REG 0x2094 168 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 169 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 170 #define MVPP2_TXQ_INDEX_REG 0x2098 171 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 172 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 173 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 174 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 175 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 176 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 177 #define MVPP2_TXQ_PENDING_REG 0x20a0 178 #define MVPP2_TXQ_PENDING_MASK 0x3fff 179 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 180 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 181 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 182 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 183 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 184 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 185 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 186 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 187 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 188 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 189 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 190 #define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 191 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 192 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 193 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 194 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 195 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 196 197 /* MBUS bridge registers */ 198 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 199 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 200 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 201 #define MVPP2_BASE_ADDR_ENABLE 0x4060 202 203 /* AXI Bridge Registers */ 204 #define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 205 #define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 206 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 207 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 208 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 209 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 210 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 211 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 212 #define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 213 #define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 214 #define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 215 #define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 216 217 /* Values for AXI Bridge registers */ 218 #define MVPP22_AXI_ATTR_CACHE_OFFS 0 219 #define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 220 221 #define MVPP22_AXI_CODE_CACHE_OFFS 0 222 #define MVPP22_AXI_CODE_DOMAIN_OFFS 4 223 224 #define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 225 #define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 226 #define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 227 228 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 229 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 230 231 /* Interrupt Cause and Mask registers */ 232 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 233 #define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 234 235 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 236 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 237 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 238 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 239 240 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 241 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 242 243 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 244 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 245 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 246 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 247 248 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 249 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 250 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 251 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 252 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 253 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 254 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 255 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 256 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 257 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 258 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 259 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 260 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 261 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 262 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 263 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 264 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 265 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 266 267 /* Buffer Manager registers */ 268 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 269 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 270 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 271 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 272 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 273 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 274 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 275 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 276 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 277 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 278 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 279 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 280 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 281 #define MVPP2_BM_START_MASK BIT(0) 282 #define MVPP2_BM_STOP_MASK BIT(1) 283 #define MVPP2_BM_STATE_MASK BIT(4) 284 #define MVPP2_BM_LOW_THRESH_OFFS 8 285 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 286 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 287 MVPP2_BM_LOW_THRESH_OFFS) 288 #define MVPP2_BM_HIGH_THRESH_OFFS 16 289 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 290 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 291 MVPP2_BM_HIGH_THRESH_OFFS) 292 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 293 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 294 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 295 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 296 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 297 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 298 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 299 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 300 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 301 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 302 #define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 303 #define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 304 #define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 305 #define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 306 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 307 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 308 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 309 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 310 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 311 #define MVPP21_BM_MC_RLS_REG 0x64c4 312 #define MVPP2_BM_MC_ID_MASK 0xfff 313 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 314 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 315 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 316 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 317 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 318 #define MVPP22_BM_MC_RLS_REG 0x64d4 319 #define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310 320 #define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff 321 322 /* TX Scheduler registers */ 323 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 324 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 325 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 326 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 327 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 328 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 329 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 330 #define MVPP2_TXP_MTU_MAX 0x7FFFF 331 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 332 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 333 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 334 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 335 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 336 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 337 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 338 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 339 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 340 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 341 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 342 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 343 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 344 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 345 346 /* TX general registers */ 347 #define MVPP2_TX_SNOOP_REG 0x8800 348 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 349 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 350 351 /* LMS registers */ 352 #define MVPP2_SRC_ADDR_MIDDLE 0x24 353 #define MVPP2_SRC_ADDR_HIGH 0x28 354 #define MVPP2_PHY_AN_CFG0_REG 0x34 355 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 356 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 357 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 358 359 /* Per-port registers */ 360 #define MVPP2_GMAC_CTRL_0_REG 0x0 361 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 362 #define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 363 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 364 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 365 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 366 #define MVPP2_GMAC_CTRL_1_REG 0x4 367 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 368 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 369 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 370 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 371 #define MVPP2_GMAC_SA_LOW_OFFS 7 372 #define MVPP2_GMAC_CTRL_2_REG 0x8 373 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 374 #define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 375 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 376 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 377 #define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 378 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 379 #define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 380 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 381 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 382 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 383 #define MVPP2_GMAC_EN_PCS_AN BIT(2) 384 #define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 385 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 386 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 387 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 388 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 389 #define MVPP2_GMAC_EN_FC_AN BIT(11) 390 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 391 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 392 #define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 393 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 394 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 395 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 396 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 397 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 398 #define MVPP2_GMAC_CTRL_4_REG 0x90 399 #define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 400 #define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 401 #define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 402 #define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 403 404 /* 405 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 406 * relative to port->base. 407 */ 408 409 /* Port Mac Control0 */ 410 #define MVPP22_XLG_CTRL0_REG 0x100 411 #define MVPP22_XLG_PORT_EN BIT(0) 412 #define MVPP22_XLG_MAC_RESETN BIT(1) 413 #define MVPP22_XLG_RX_FC_EN BIT(7) 414 #define MVPP22_XLG_MIBCNT_DIS BIT(13) 415 /* Port Mac Control1 */ 416 #define MVPP22_XLG_CTRL1_REG 0x104 417 #define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 418 #define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 419 /* Port Interrupt Mask */ 420 #define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 421 #define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 422 /* Port Mac Control3 */ 423 #define MVPP22_XLG_CTRL3_REG 0x11c 424 #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 425 #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 426 #define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 427 /* Port Mac Control4 */ 428 #define MVPP22_XLG_CTRL4_REG 0x184 429 #define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 430 #define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 431 #define MVPP22_XLG_MODE_DMA_1G BIT(12) 432 #define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 433 434 /* XPCS registers */ 435 436 /* Global Configuration 0 */ 437 #define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 438 #define MVPP22_XPCS_PCSRESET BIT(0) 439 #define MVPP22_XPCS_PCSMODE_OFFS 3 440 #define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 441 MVPP22_XPCS_PCSMODE_OFFS) 442 #define MVPP22_XPCS_LANEACTIVE_OFFS 5 443 #define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 444 MVPP22_XPCS_LANEACTIVE_OFFS) 445 446 /* MPCS registers */ 447 448 #define PCS40G_COMMON_CONTROL 0x14 449 #define FORWARD_ERROR_CORRECTION_MASK BIT(10) 450 451 #define PCS_CLOCK_RESET 0x14c 452 #define TX_SD_CLK_RESET_MASK BIT(0) 453 #define RX_SD_CLK_RESET_MASK BIT(1) 454 #define MAC_CLK_RESET_MASK BIT(2) 455 #define CLK_DIVISION_RATIO_OFFS 4 456 #define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 457 #define CLK_DIV_PHASE_SET_MASK BIT(11) 458 459 /* System Soft Reset 1 */ 460 #define GOP_SOFT_RESET_1_REG 0x108 461 #define NETC_GOP_SOFT_RESET_OFFS 6 462 #define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 463 NETC_GOP_SOFT_RESET_OFFS) 464 465 /* Ports Control 0 */ 466 #define NETCOMP_PORTS_CONTROL_0_REG 0x110 467 #define NETC_BUS_WIDTH_SELECT_OFFS 1 468 #define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 469 NETC_BUS_WIDTH_SELECT_OFFS) 470 #define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 471 #define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 472 NETC_GIG_RX_DATA_SAMPLE_OFFS) 473 #define NETC_CLK_DIV_PHASE_OFFS 31 474 #define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 475 /* Ports Control 1 */ 476 #define NETCOMP_PORTS_CONTROL_1_REG 0x114 477 #define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 478 #define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 479 NETC_PORTS_ACTIVE_OFFSET(p)) 480 #define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 481 #define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 482 NETC_PORT_GIG_RF_RESET_OFFS(p)) 483 #define NETCOMP_CONTROL_0_REG 0x120 484 #define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 485 #define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 486 NETC_GBE_PORT0_SGMII_MODE_OFFS) 487 #define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 488 #define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 489 NETC_GBE_PORT1_SGMII_MODE_OFFS) 490 #define NETC_GBE_PORT1_MII_MODE_OFFS 2 491 #define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 492 NETC_GBE_PORT1_MII_MODE_OFFS) 493 494 #define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 495 #define MVPP22_SMI_POLLING_EN BIT(10) 496 497 #define MVPP22_SMI_PHY_ADDR_REG(port) (MVPP22_SMI + 0x04 + \ 498 (0x4 * (port))) 499 500 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 501 502 /* Descriptor ring Macros */ 503 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 504 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 505 506 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 507 #define MVPP21_SMI 0x0054 508 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 509 #define MVPP22_SMI 0x1200 510 #define MVPP2_PHY_REG_MASK 0x1f 511 /* SMI register fields */ 512 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 513 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 514 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 515 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 516 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 517 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 518 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 519 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 520 521 #define MVPP2_PHY_ADDR_MASK 0x1f 522 #define MVPP2_PHY_REG_MASK 0x1f 523 524 /* Additional PPv2.2 offsets */ 525 #define MVPP22_MPCS 0x007000 526 #define MVPP22_XPCS 0x007400 527 #define MVPP22_PORT_BASE 0x007e00 528 #define MVPP22_PORT_OFFSET 0x001000 529 #define MVPP22_RFU1 0x318000 530 531 /* Maximum number of ports */ 532 #define MVPP22_GOP_MAC_NUM 4 533 534 /* Sets the field located at the specified in data */ 535 #define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 536 #define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 537 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 538 539 /* Net Complex */ 540 enum mv_netc_topology { 541 MV_NETC_GE_MAC2_SGMII = BIT(0), 542 MV_NETC_GE_MAC3_SGMII = BIT(1), 543 MV_NETC_GE_MAC3_RGMII = BIT(2), 544 }; 545 546 enum mv_netc_phase { 547 MV_NETC_FIRST_PHASE, 548 MV_NETC_SECOND_PHASE, 549 }; 550 551 enum mv_netc_sgmii_xmi_mode { 552 MV_NETC_GBE_SGMII, 553 MV_NETC_GBE_XMII, 554 }; 555 556 enum mv_netc_mii_mode { 557 MV_NETC_GBE_RGMII, 558 MV_NETC_GBE_MII, 559 }; 560 561 enum mv_netc_lanes { 562 MV_NETC_LANE_23, 563 MV_NETC_LANE_45, 564 }; 565 566 /* Various constants */ 567 568 /* Coalescing */ 569 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 570 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 571 #define MVPP2_RX_COAL_PKTS 32 572 #define MVPP2_RX_COAL_USEC 100 573 574 /* The two bytes Marvell header. Either contains a special value used 575 * by Marvell switches when a specific hardware mode is enabled (not 576 * supported by this driver) or is filled automatically by zeroes on 577 * the RX side. Those two bytes being at the front of the Ethernet 578 * header, they allow to have the IP header aligned on a 4 bytes 579 * boundary automatically: the hardware skips those two bytes on its 580 * own. 581 */ 582 #define MVPP2_MH_SIZE 2 583 #define MVPP2_ETH_TYPE_LEN 2 584 #define MVPP2_PPPOE_HDR_SIZE 8 585 #define MVPP2_VLAN_TAG_LEN 4 586 587 /* Lbtd 802.3 type */ 588 #define MVPP2_IP_LBDT_TYPE 0xfffa 589 590 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 591 #define MVPP2_TX_CSUM_MAX_SIZE 9800 592 593 /* Timeout constants */ 594 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 595 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 596 597 #define MVPP2_TX_MTU_MAX 0x7ffff 598 599 /* Maximum number of T-CONTs of PON port */ 600 #define MVPP2_MAX_TCONT 16 601 602 /* Maximum number of supported ports */ 603 #define MVPP2_MAX_PORTS 4 604 605 /* Maximum number of TXQs used by single port */ 606 #define MVPP2_MAX_TXQ 8 607 608 /* Default number of TXQs in use */ 609 #define MVPP2_DEFAULT_TXQ 1 610 611 /* Dfault number of RXQs in use */ 612 #define MVPP2_DEFAULT_RXQ 1 613 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 614 615 /* Max number of Rx descriptors */ 616 #define MVPP2_MAX_RXD 16 617 618 /* Max number of Tx descriptors */ 619 #define MVPP2_MAX_TXD 16 620 621 /* Amount of Tx descriptors that can be reserved at once by CPU */ 622 #define MVPP2_CPU_DESC_CHUNK 16 623 624 /* Max number of Tx descriptors in each aggregated queue */ 625 #define MVPP2_AGGR_TXQ_SIZE 16 626 627 /* Descriptor aligned size */ 628 #define MVPP2_DESC_ALIGNED_SIZE 32 629 630 /* Descriptor alignment mask */ 631 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 632 633 /* RX FIFO constants */ 634 #define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 635 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 636 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 637 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 638 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 639 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 640 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 641 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 642 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 643 644 /* TX general registers */ 645 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 646 #define MVPP22_TX_FIFO_SIZE_MASK 0xf 647 648 /* TX FIFO constants */ 649 #define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 650 #define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 651 652 /* RX buffer constants */ 653 #define MVPP2_SKB_SHINFO_SIZE \ 654 0 655 656 #define MVPP2_RX_PKT_SIZE(mtu) \ 657 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 658 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 659 660 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 661 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 662 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 663 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 664 665 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 666 667 /* IPv6 max L3 address size */ 668 #define MVPP2_MAX_L3_ADDR_SIZE 16 669 670 /* Port flags */ 671 #define MVPP2_F_LOOPBACK BIT(0) 672 673 /* Marvell tag types */ 674 enum mvpp2_tag_type { 675 MVPP2_TAG_TYPE_NONE = 0, 676 MVPP2_TAG_TYPE_MH = 1, 677 MVPP2_TAG_TYPE_DSA = 2, 678 MVPP2_TAG_TYPE_EDSA = 3, 679 MVPP2_TAG_TYPE_VLAN = 4, 680 MVPP2_TAG_TYPE_LAST = 5 681 }; 682 683 /* Parser constants */ 684 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 685 #define MVPP2_PRS_TCAM_WORDS 6 686 #define MVPP2_PRS_SRAM_WORDS 4 687 #define MVPP2_PRS_FLOW_ID_SIZE 64 688 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 689 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 690 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 691 #define MVPP2_PRS_IPV4_HEAD 0x40 692 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 693 #define MVPP2_PRS_IPV4_MC 0xe0 694 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 695 #define MVPP2_PRS_IPV4_BC_MASK 0xff 696 #define MVPP2_PRS_IPV4_IHL 0x5 697 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 698 #define MVPP2_PRS_IPV6_MC 0xff 699 #define MVPP2_PRS_IPV6_MC_MASK 0xff 700 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 701 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 702 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 703 #define MVPP2_PRS_DBL_VLANS_MAX 100 704 705 /* Tcam structure: 706 * - lookup ID - 4 bits 707 * - port ID - 1 byte 708 * - additional information - 1 byte 709 * - header data - 8 bytes 710 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 711 */ 712 #define MVPP2_PRS_AI_BITS 8 713 #define MVPP2_PRS_PORT_MASK 0xff 714 #define MVPP2_PRS_LU_MASK 0xf 715 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 716 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 717 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 718 (((offs) * 2) - ((offs) % 2) + 2) 719 #define MVPP2_PRS_TCAM_AI_BYTE 16 720 #define MVPP2_PRS_TCAM_PORT_BYTE 17 721 #define MVPP2_PRS_TCAM_LU_BYTE 20 722 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 723 #define MVPP2_PRS_TCAM_INV_WORD 5 724 /* Tcam entries ID */ 725 #define MVPP2_PE_DROP_ALL 0 726 #define MVPP2_PE_FIRST_FREE_TID 1 727 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 728 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 729 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 730 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 731 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 732 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 733 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 734 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 735 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 736 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 737 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 738 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 739 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 740 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 741 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 742 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 743 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 744 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 745 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 746 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 747 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 748 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 749 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 750 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 751 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 752 753 /* Sram structure 754 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 755 */ 756 #define MVPP2_PRS_SRAM_RI_OFFS 0 757 #define MVPP2_PRS_SRAM_RI_WORD 0 758 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 759 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 760 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 761 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 762 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 763 #define MVPP2_PRS_SRAM_UDF_OFFS 73 764 #define MVPP2_PRS_SRAM_UDF_BITS 8 765 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 766 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 767 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 768 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 769 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 770 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 771 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 772 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 773 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 774 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 775 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 776 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 777 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 778 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 779 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 780 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 781 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 782 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 783 #define MVPP2_PRS_SRAM_AI_OFFS 90 784 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 785 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 786 #define MVPP2_PRS_SRAM_AI_MASK 0xff 787 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 788 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 789 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 790 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 791 792 /* Sram result info bits assignment */ 793 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 794 #define MVPP2_PRS_RI_DSA_MASK 0x2 795 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 796 #define MVPP2_PRS_RI_VLAN_NONE 0x0 797 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 798 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 799 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 800 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 801 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 802 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 803 #define MVPP2_PRS_RI_L2_UCAST 0x0 804 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 805 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 806 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 807 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 808 #define MVPP2_PRS_RI_L3_UN 0x0 809 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 810 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 811 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 812 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 813 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 814 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 815 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 816 #define MVPP2_PRS_RI_L3_UCAST 0x0 817 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 818 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 819 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 820 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 821 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 822 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 823 #define MVPP2_PRS_RI_L4_TCP BIT(22) 824 #define MVPP2_PRS_RI_L4_UDP BIT(23) 825 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 826 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 827 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 828 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 829 830 /* Sram additional info bits assignment */ 831 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 832 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 833 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 834 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 835 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 836 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 837 #define MVPP2_PRS_SINGLE_VLAN_AI 0 838 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 839 840 /* DSA/EDSA type */ 841 #define MVPP2_PRS_TAGGED true 842 #define MVPP2_PRS_UNTAGGED false 843 #define MVPP2_PRS_EDSA true 844 #define MVPP2_PRS_DSA false 845 846 /* MAC entries, shadow udf */ 847 enum mvpp2_prs_udf { 848 MVPP2_PRS_UDF_MAC_DEF, 849 MVPP2_PRS_UDF_MAC_RANGE, 850 MVPP2_PRS_UDF_L2_DEF, 851 MVPP2_PRS_UDF_L2_DEF_COPY, 852 MVPP2_PRS_UDF_L2_USER, 853 }; 854 855 /* Lookup ID */ 856 enum mvpp2_prs_lookup { 857 MVPP2_PRS_LU_MH, 858 MVPP2_PRS_LU_MAC, 859 MVPP2_PRS_LU_DSA, 860 MVPP2_PRS_LU_VLAN, 861 MVPP2_PRS_LU_L2, 862 MVPP2_PRS_LU_PPPOE, 863 MVPP2_PRS_LU_IP4, 864 MVPP2_PRS_LU_IP6, 865 MVPP2_PRS_LU_FLOWS, 866 MVPP2_PRS_LU_LAST, 867 }; 868 869 /* L3 cast enum */ 870 enum mvpp2_prs_l3_cast { 871 MVPP2_PRS_L3_UNI_CAST, 872 MVPP2_PRS_L3_MULTI_CAST, 873 MVPP2_PRS_L3_BROAD_CAST 874 }; 875 876 /* Classifier constants */ 877 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 878 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 879 #define MVPP2_CLS_LKP_TBL_SIZE 64 880 881 /* BM constants */ 882 #define MVPP2_BM_POOLS_NUM 1 883 #define MVPP2_BM_LONG_BUF_NUM 16 884 #define MVPP2_BM_SHORT_BUF_NUM 16 885 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 886 #define MVPP2_BM_POOL_PTR_ALIGN 128 887 #define MVPP2_BM_SWF_LONG_POOL(port) 0 888 889 /* BM cookie (32 bits) definition */ 890 #define MVPP2_BM_COOKIE_POOL_OFFS 8 891 #define MVPP2_BM_COOKIE_CPU_OFFS 24 892 893 /* BM short pool packet size 894 * These value assure that for SWF the total number 895 * of bytes allocated for each buffer will be 512 896 */ 897 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 898 899 enum mvpp2_bm_type { 900 MVPP2_BM_FREE, 901 MVPP2_BM_SWF_LONG, 902 MVPP2_BM_SWF_SHORT 903 }; 904 905 /* Definitions */ 906 907 /* Shared Packet Processor resources */ 908 struct mvpp2 { 909 /* Shared registers' base addresses */ 910 void __iomem *base; 911 void __iomem *lms_base; 912 void __iomem *iface_base; 913 void __iomem *mdio_base; 914 915 void __iomem *mpcs_base; 916 void __iomem *xpcs_base; 917 void __iomem *rfu1_base; 918 919 u32 netc_config; 920 921 /* List of pointers to port structures */ 922 struct mvpp2_port **port_list; 923 924 /* Aggregated TXQs */ 925 struct mvpp2_tx_queue *aggr_txqs; 926 927 /* BM pools */ 928 struct mvpp2_bm_pool *bm_pools; 929 930 /* PRS shadow table */ 931 struct mvpp2_prs_shadow *prs_shadow; 932 /* PRS auxiliary table for double vlan entries control */ 933 bool *prs_double_vlans; 934 935 /* Tclk value */ 936 u32 tclk; 937 938 /* HW version */ 939 enum { MVPP21, MVPP22 } hw_version; 940 941 /* Maximum number of RXQs per port */ 942 unsigned int max_port_rxqs; 943 944 struct mii_dev *bus; 945 946 int probe_done; 947 u8 num_ports; 948 }; 949 950 struct mvpp2_pcpu_stats { 951 u64 rx_packets; 952 u64 rx_bytes; 953 u64 tx_packets; 954 u64 tx_bytes; 955 }; 956 957 struct mvpp2_port { 958 u8 id; 959 960 /* Index of the port from the "group of ports" complex point 961 * of view 962 */ 963 int gop_id; 964 965 int irq; 966 967 struct mvpp2 *priv; 968 969 /* Per-port registers' base address */ 970 void __iomem *base; 971 972 struct mvpp2_rx_queue **rxqs; 973 struct mvpp2_tx_queue **txqs; 974 975 int pkt_size; 976 977 u32 pending_cause_rx; 978 979 /* Per-CPU port control */ 980 struct mvpp2_port_pcpu __percpu *pcpu; 981 982 /* Flags */ 983 unsigned long flags; 984 985 u16 tx_ring_size; 986 u16 rx_ring_size; 987 struct mvpp2_pcpu_stats __percpu *stats; 988 989 struct phy_device *phy_dev; 990 phy_interface_t phy_interface; 991 int phy_node; 992 int phyaddr; 993 #ifdef CONFIG_DM_GPIO 994 struct gpio_desc phy_reset_gpio; 995 struct gpio_desc phy_tx_disable_gpio; 996 #endif 997 int init; 998 unsigned int link; 999 unsigned int duplex; 1000 unsigned int speed; 1001 1002 unsigned int phy_speed; /* SGMII 1Gbps vs 2.5Gbps */ 1003 1004 struct mvpp2_bm_pool *pool_long; 1005 struct mvpp2_bm_pool *pool_short; 1006 1007 /* Index of first port's physical RXQ */ 1008 u8 first_rxq; 1009 1010 u8 dev_addr[ETH_ALEN]; 1011 }; 1012 1013 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 1014 * layout of the transmit and reception DMA descriptors, and their 1015 * layout is therefore defined by the hardware design 1016 */ 1017 1018 #define MVPP2_TXD_L3_OFF_SHIFT 0 1019 #define MVPP2_TXD_IP_HLEN_SHIFT 8 1020 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 1021 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 1022 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 1023 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 1024 #define MVPP2_TXD_L4_UDP BIT(24) 1025 #define MVPP2_TXD_L3_IP6 BIT(26) 1026 #define MVPP2_TXD_L_DESC BIT(28) 1027 #define MVPP2_TXD_F_DESC BIT(29) 1028 1029 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 1030 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1031 #define MVPP2_RXD_ERR_CRC 0x0 1032 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 1033 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1034 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 1035 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1036 #define MVPP2_RXD_HWF_SYNC BIT(21) 1037 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 1038 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1039 #define MVPP2_RXD_L4_TCP BIT(25) 1040 #define MVPP2_RXD_L4_UDP BIT(26) 1041 #define MVPP2_RXD_L3_IP4 BIT(28) 1042 #define MVPP2_RXD_L3_IP6 BIT(30) 1043 #define MVPP2_RXD_BUF_HDR BIT(31) 1044 1045 /* HW TX descriptor for PPv2.1 */ 1046 struct mvpp21_tx_desc { 1047 u32 command; /* Options used by HW for packet transmitting.*/ 1048 u8 packet_offset; /* the offset from the buffer beginning */ 1049 u8 phys_txq; /* destination queue ID */ 1050 u16 data_size; /* data size of transmitted packet in bytes */ 1051 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1052 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1053 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1054 u32 reserved2; /* reserved (for future use) */ 1055 }; 1056 1057 /* HW RX descriptor for PPv2.1 */ 1058 struct mvpp21_rx_desc { 1059 u32 status; /* info about received packet */ 1060 u16 reserved1; /* parser_info (for future use, PnC) */ 1061 u16 data_size; /* size of received packet in bytes */ 1062 u32 buf_dma_addr; /* physical address of the buffer */ 1063 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1064 u16 reserved2; /* gem_port_id (for future use, PON) */ 1065 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1066 u8 reserved4; /* bm_qset (for future use, BM) */ 1067 u8 reserved5; 1068 u16 reserved6; /* classify_info (for future use, PnC) */ 1069 u32 reserved7; /* flow_id (for future use, PnC) */ 1070 u32 reserved8; 1071 }; 1072 1073 /* HW TX descriptor for PPv2.2 */ 1074 struct mvpp22_tx_desc { 1075 u32 command; 1076 u8 packet_offset; 1077 u8 phys_txq; 1078 u16 data_size; 1079 u64 reserved1; 1080 u64 buf_dma_addr_ptp; 1081 u64 buf_cookie_misc; 1082 }; 1083 1084 /* HW RX descriptor for PPv2.2 */ 1085 struct mvpp22_rx_desc { 1086 u32 status; 1087 u16 reserved1; 1088 u16 data_size; 1089 u32 reserved2; 1090 u32 reserved3; 1091 u64 buf_dma_addr_key_hash; 1092 u64 buf_cookie_misc; 1093 }; 1094 1095 /* Opaque type used by the driver to manipulate the HW TX and RX 1096 * descriptors 1097 */ 1098 struct mvpp2_tx_desc { 1099 union { 1100 struct mvpp21_tx_desc pp21; 1101 struct mvpp22_tx_desc pp22; 1102 }; 1103 }; 1104 1105 struct mvpp2_rx_desc { 1106 union { 1107 struct mvpp21_rx_desc pp21; 1108 struct mvpp22_rx_desc pp22; 1109 }; 1110 }; 1111 1112 /* Per-CPU Tx queue control */ 1113 struct mvpp2_txq_pcpu { 1114 int cpu; 1115 1116 /* Number of Tx DMA descriptors in the descriptor ring */ 1117 int size; 1118 1119 /* Number of currently used Tx DMA descriptor in the 1120 * descriptor ring 1121 */ 1122 int count; 1123 1124 /* Number of Tx DMA descriptors reserved for each CPU */ 1125 int reserved_num; 1126 1127 /* Index of last TX DMA descriptor that was inserted */ 1128 int txq_put_index; 1129 1130 /* Index of the TX DMA descriptor to be cleaned up */ 1131 int txq_get_index; 1132 }; 1133 1134 struct mvpp2_tx_queue { 1135 /* Physical number of this Tx queue */ 1136 u8 id; 1137 1138 /* Logical number of this Tx queue */ 1139 u8 log_id; 1140 1141 /* Number of Tx DMA descriptors in the descriptor ring */ 1142 int size; 1143 1144 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1145 int count; 1146 1147 /* Per-CPU control of physical Tx queues */ 1148 struct mvpp2_txq_pcpu __percpu *pcpu; 1149 1150 u32 done_pkts_coal; 1151 1152 /* Virtual address of thex Tx DMA descriptors array */ 1153 struct mvpp2_tx_desc *descs; 1154 1155 /* DMA address of the Tx DMA descriptors array */ 1156 dma_addr_t descs_dma; 1157 1158 /* Index of the last Tx DMA descriptor */ 1159 int last_desc; 1160 1161 /* Index of the next Tx DMA descriptor to process */ 1162 int next_desc_to_proc; 1163 }; 1164 1165 struct mvpp2_rx_queue { 1166 /* RX queue number, in the range 0-31 for physical RXQs */ 1167 u8 id; 1168 1169 /* Num of rx descriptors in the rx descriptor ring */ 1170 int size; 1171 1172 u32 pkts_coal; 1173 u32 time_coal; 1174 1175 /* Virtual address of the RX DMA descriptors array */ 1176 struct mvpp2_rx_desc *descs; 1177 1178 /* DMA address of the RX DMA descriptors array */ 1179 dma_addr_t descs_dma; 1180 1181 /* Index of the last RX DMA descriptor */ 1182 int last_desc; 1183 1184 /* Index of the next RX DMA descriptor to process */ 1185 int next_desc_to_proc; 1186 1187 /* ID of port to which physical RXQ is mapped */ 1188 int port; 1189 1190 /* Port's logic RXQ number to which physical RXQ is mapped */ 1191 int logic_rxq; 1192 }; 1193 1194 union mvpp2_prs_tcam_entry { 1195 u32 word[MVPP2_PRS_TCAM_WORDS]; 1196 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1197 }; 1198 1199 union mvpp2_prs_sram_entry { 1200 u32 word[MVPP2_PRS_SRAM_WORDS]; 1201 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1202 }; 1203 1204 struct mvpp2_prs_entry { 1205 u32 index; 1206 union mvpp2_prs_tcam_entry tcam; 1207 union mvpp2_prs_sram_entry sram; 1208 }; 1209 1210 struct mvpp2_prs_shadow { 1211 bool valid; 1212 bool finish; 1213 1214 /* Lookup ID */ 1215 int lu; 1216 1217 /* User defined offset */ 1218 int udf; 1219 1220 /* Result info */ 1221 u32 ri; 1222 u32 ri_mask; 1223 }; 1224 1225 struct mvpp2_cls_flow_entry { 1226 u32 index; 1227 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1228 }; 1229 1230 struct mvpp2_cls_lookup_entry { 1231 u32 lkpid; 1232 u32 way; 1233 u32 data; 1234 }; 1235 1236 struct mvpp2_bm_pool { 1237 /* Pool number in the range 0-7 */ 1238 int id; 1239 enum mvpp2_bm_type type; 1240 1241 /* Buffer Pointers Pool External (BPPE) size */ 1242 int size; 1243 /* Number of buffers for this pool */ 1244 int buf_num; 1245 /* Pool buffer size */ 1246 int buf_size; 1247 /* Packet size */ 1248 int pkt_size; 1249 1250 /* BPPE virtual base address */ 1251 unsigned long *virt_addr; 1252 /* BPPE DMA base address */ 1253 dma_addr_t dma_addr; 1254 1255 /* Ports using BM pool */ 1256 u32 port_map; 1257 }; 1258 1259 /* Static declaractions */ 1260 1261 /* Number of RXQs used by single port */ 1262 static int rxq_number = MVPP2_DEFAULT_RXQ; 1263 /* Number of TXQs used by single port */ 1264 static int txq_number = MVPP2_DEFAULT_TXQ; 1265 1266 static int base_id; 1267 1268 #define MVPP2_DRIVER_NAME "mvpp2" 1269 #define MVPP2_DRIVER_VERSION "1.0" 1270 1271 /* 1272 * U-Boot internal data, mostly uncached buffers for descriptors and data 1273 */ 1274 struct buffer_location { 1275 struct mvpp2_tx_desc *aggr_tx_descs; 1276 struct mvpp2_tx_desc *tx_descs; 1277 struct mvpp2_rx_desc *rx_descs; 1278 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1279 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1280 int first_rxq; 1281 }; 1282 1283 /* 1284 * All 4 interfaces use the same global buffer, since only one interface 1285 * can be enabled at once 1286 */ 1287 static struct buffer_location buffer_loc; 1288 1289 /* 1290 * Page table entries are set to 1MB, or multiples of 1MB 1291 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1292 */ 1293 #define BD_SPACE (1 << 20) 1294 1295 /* Utility/helper methods */ 1296 1297 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1298 { 1299 writel(data, priv->base + offset); 1300 } 1301 1302 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1303 { 1304 return readl(priv->base + offset); 1305 } 1306 1307 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1308 struct mvpp2_tx_desc *tx_desc, 1309 dma_addr_t dma_addr) 1310 { 1311 if (port->priv->hw_version == MVPP21) { 1312 tx_desc->pp21.buf_dma_addr = dma_addr; 1313 } else { 1314 u64 val = (u64)dma_addr; 1315 1316 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1317 tx_desc->pp22.buf_dma_addr_ptp |= val; 1318 } 1319 } 1320 1321 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1322 struct mvpp2_tx_desc *tx_desc, 1323 size_t size) 1324 { 1325 if (port->priv->hw_version == MVPP21) 1326 tx_desc->pp21.data_size = size; 1327 else 1328 tx_desc->pp22.data_size = size; 1329 } 1330 1331 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1332 struct mvpp2_tx_desc *tx_desc, 1333 unsigned int txq) 1334 { 1335 if (port->priv->hw_version == MVPP21) 1336 tx_desc->pp21.phys_txq = txq; 1337 else 1338 tx_desc->pp22.phys_txq = txq; 1339 } 1340 1341 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1342 struct mvpp2_tx_desc *tx_desc, 1343 unsigned int command) 1344 { 1345 if (port->priv->hw_version == MVPP21) 1346 tx_desc->pp21.command = command; 1347 else 1348 tx_desc->pp22.command = command; 1349 } 1350 1351 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1352 struct mvpp2_tx_desc *tx_desc, 1353 unsigned int offset) 1354 { 1355 if (port->priv->hw_version == MVPP21) 1356 tx_desc->pp21.packet_offset = offset; 1357 else 1358 tx_desc->pp22.packet_offset = offset; 1359 } 1360 1361 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1362 struct mvpp2_rx_desc *rx_desc) 1363 { 1364 if (port->priv->hw_version == MVPP21) 1365 return rx_desc->pp21.buf_dma_addr; 1366 else 1367 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1368 } 1369 1370 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1371 struct mvpp2_rx_desc *rx_desc) 1372 { 1373 if (port->priv->hw_version == MVPP21) 1374 return rx_desc->pp21.buf_cookie; 1375 else 1376 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1377 } 1378 1379 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1380 struct mvpp2_rx_desc *rx_desc) 1381 { 1382 if (port->priv->hw_version == MVPP21) 1383 return rx_desc->pp21.data_size; 1384 else 1385 return rx_desc->pp22.data_size; 1386 } 1387 1388 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1389 struct mvpp2_rx_desc *rx_desc) 1390 { 1391 if (port->priv->hw_version == MVPP21) 1392 return rx_desc->pp21.status; 1393 else 1394 return rx_desc->pp22.status; 1395 } 1396 1397 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1398 { 1399 txq_pcpu->txq_get_index++; 1400 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1401 txq_pcpu->txq_get_index = 0; 1402 } 1403 1404 /* Get number of physical egress port */ 1405 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1406 { 1407 return MVPP2_MAX_TCONT + port->id; 1408 } 1409 1410 /* Get number of physical TXQ */ 1411 static inline int mvpp2_txq_phys(int port, int txq) 1412 { 1413 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1414 } 1415 1416 /* Parser configuration routines */ 1417 1418 /* Update parser tcam and sram hw entries */ 1419 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1420 { 1421 int i; 1422 1423 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1424 return -EINVAL; 1425 1426 /* Clear entry invalidation bit */ 1427 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1428 1429 /* Write tcam index - indirect access */ 1430 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1431 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1432 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1433 1434 /* Write sram index - indirect access */ 1435 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1436 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1437 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1438 1439 return 0; 1440 } 1441 1442 /* Read tcam entry from hw */ 1443 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1444 { 1445 int i; 1446 1447 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1448 return -EINVAL; 1449 1450 /* Write tcam index - indirect access */ 1451 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1452 1453 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1454 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1455 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1456 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1457 1458 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1459 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1460 1461 /* Write sram index - indirect access */ 1462 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1463 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1464 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1465 1466 return 0; 1467 } 1468 1469 /* Invalidate tcam hw entry */ 1470 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1471 { 1472 /* Write index - indirect access */ 1473 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1474 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1475 MVPP2_PRS_TCAM_INV_MASK); 1476 } 1477 1478 /* Enable shadow table entry and set its lookup ID */ 1479 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1480 { 1481 priv->prs_shadow[index].valid = true; 1482 priv->prs_shadow[index].lu = lu; 1483 } 1484 1485 /* Update ri fields in shadow table entry */ 1486 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1487 unsigned int ri, unsigned int ri_mask) 1488 { 1489 priv->prs_shadow[index].ri_mask = ri_mask; 1490 priv->prs_shadow[index].ri = ri; 1491 } 1492 1493 /* Update lookup field in tcam sw entry */ 1494 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1495 { 1496 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1497 1498 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1499 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1500 } 1501 1502 /* Update mask for single port in tcam sw entry */ 1503 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1504 unsigned int port, bool add) 1505 { 1506 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1507 1508 if (add) 1509 pe->tcam.byte[enable_off] &= ~(1 << port); 1510 else 1511 pe->tcam.byte[enable_off] |= 1 << port; 1512 } 1513 1514 /* Update port map in tcam sw entry */ 1515 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1516 unsigned int ports) 1517 { 1518 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1519 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1520 1521 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1522 pe->tcam.byte[enable_off] &= ~port_mask; 1523 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1524 } 1525 1526 /* Obtain port map from tcam sw entry */ 1527 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1528 { 1529 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1530 1531 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1532 } 1533 1534 /* Set byte of data and its enable bits in tcam sw entry */ 1535 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1536 unsigned int offs, unsigned char byte, 1537 unsigned char enable) 1538 { 1539 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1540 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1541 } 1542 1543 /* Get byte of data and its enable bits from tcam sw entry */ 1544 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1545 unsigned int offs, unsigned char *byte, 1546 unsigned char *enable) 1547 { 1548 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1549 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1550 } 1551 1552 /* Set ethertype in tcam sw entry */ 1553 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1554 unsigned short ethertype) 1555 { 1556 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1557 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1558 } 1559 1560 /* Set bits in sram sw entry */ 1561 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1562 int val) 1563 { 1564 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1565 } 1566 1567 /* Clear bits in sram sw entry */ 1568 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1569 int val) 1570 { 1571 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1572 } 1573 1574 /* Update ri bits in sram sw entry */ 1575 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1576 unsigned int bits, unsigned int mask) 1577 { 1578 unsigned int i; 1579 1580 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1581 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1582 1583 if (!(mask & BIT(i))) 1584 continue; 1585 1586 if (bits & BIT(i)) 1587 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1588 else 1589 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1590 1591 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1592 } 1593 } 1594 1595 /* Update ai bits in sram sw entry */ 1596 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1597 unsigned int bits, unsigned int mask) 1598 { 1599 unsigned int i; 1600 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1601 1602 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1603 1604 if (!(mask & BIT(i))) 1605 continue; 1606 1607 if (bits & BIT(i)) 1608 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1609 else 1610 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1611 1612 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1613 } 1614 } 1615 1616 /* Read ai bits from sram sw entry */ 1617 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1618 { 1619 u8 bits; 1620 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1621 int ai_en_off = ai_off + 1; 1622 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1623 1624 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1625 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1626 1627 return bits; 1628 } 1629 1630 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1631 * lookup interation 1632 */ 1633 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1634 unsigned int lu) 1635 { 1636 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1637 1638 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1639 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1640 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1641 } 1642 1643 /* In the sram sw entry set sign and value of the next lookup offset 1644 * and the offset value generated to the classifier 1645 */ 1646 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1647 unsigned int op) 1648 { 1649 /* Set sign */ 1650 if (shift < 0) { 1651 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1652 shift = 0 - shift; 1653 } else { 1654 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1655 } 1656 1657 /* Set value */ 1658 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1659 (unsigned char)shift; 1660 1661 /* Reset and set operation */ 1662 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1663 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1664 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1665 1666 /* Set base offset as current */ 1667 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1668 } 1669 1670 /* In the sram sw entry set sign and value of the user defined offset 1671 * generated to the classifier 1672 */ 1673 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1674 unsigned int type, int offset, 1675 unsigned int op) 1676 { 1677 /* Set sign */ 1678 if (offset < 0) { 1679 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1680 offset = 0 - offset; 1681 } else { 1682 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1683 } 1684 1685 /* Set value */ 1686 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1687 MVPP2_PRS_SRAM_UDF_MASK); 1688 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1689 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1690 MVPP2_PRS_SRAM_UDF_BITS)] &= 1691 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1692 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1693 MVPP2_PRS_SRAM_UDF_BITS)] |= 1694 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1695 1696 /* Set offset type */ 1697 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1698 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1699 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1700 1701 /* Set offset operation */ 1702 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1703 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1704 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1705 1706 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1707 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1708 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1709 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1710 1711 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1712 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1713 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1714 1715 /* Set base offset as current */ 1716 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1717 } 1718 1719 /* Find parser flow entry */ 1720 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1721 { 1722 struct mvpp2_prs_entry *pe; 1723 int tid; 1724 1725 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1726 if (!pe) 1727 return NULL; 1728 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1729 1730 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1731 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1732 u8 bits; 1733 1734 if (!priv->prs_shadow[tid].valid || 1735 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1736 continue; 1737 1738 pe->index = tid; 1739 mvpp2_prs_hw_read(priv, pe); 1740 bits = mvpp2_prs_sram_ai_get(pe); 1741 1742 /* Sram store classification lookup ID in AI bits [5:0] */ 1743 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1744 return pe; 1745 } 1746 kfree(pe); 1747 1748 return NULL; 1749 } 1750 1751 /* Return first free tcam index, seeking from start to end */ 1752 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1753 unsigned char end) 1754 { 1755 int tid; 1756 1757 if (start > end) 1758 swap(start, end); 1759 1760 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1761 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1762 1763 for (tid = start; tid <= end; tid++) { 1764 if (!priv->prs_shadow[tid].valid) 1765 return tid; 1766 } 1767 1768 return -EINVAL; 1769 } 1770 1771 /* Enable/disable dropping all mac da's */ 1772 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1773 { 1774 struct mvpp2_prs_entry pe; 1775 1776 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1777 /* Entry exist - update port only */ 1778 pe.index = MVPP2_PE_DROP_ALL; 1779 mvpp2_prs_hw_read(priv, &pe); 1780 } else { 1781 /* Entry doesn't exist - create new */ 1782 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1783 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1784 pe.index = MVPP2_PE_DROP_ALL; 1785 1786 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1787 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1788 MVPP2_PRS_RI_DROP_MASK); 1789 1790 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1791 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1792 1793 /* Update shadow table */ 1794 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1795 1796 /* Mask all ports */ 1797 mvpp2_prs_tcam_port_map_set(&pe, 0); 1798 } 1799 1800 /* Update port mask */ 1801 mvpp2_prs_tcam_port_set(&pe, port, add); 1802 1803 mvpp2_prs_hw_write(priv, &pe); 1804 } 1805 1806 /* Set port to promiscuous mode */ 1807 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1808 { 1809 struct mvpp2_prs_entry pe; 1810 1811 /* Promiscuous mode - Accept unknown packets */ 1812 1813 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1814 /* Entry exist - update port only */ 1815 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1816 mvpp2_prs_hw_read(priv, &pe); 1817 } else { 1818 /* Entry doesn't exist - create new */ 1819 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1820 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1821 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1822 1823 /* Continue - set next lookup */ 1824 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1825 1826 /* Set result info bits */ 1827 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1828 MVPP2_PRS_RI_L2_CAST_MASK); 1829 1830 /* Shift to ethertype */ 1831 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1832 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1833 1834 /* Mask all ports */ 1835 mvpp2_prs_tcam_port_map_set(&pe, 0); 1836 1837 /* Update shadow table */ 1838 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1839 } 1840 1841 /* Update port mask */ 1842 mvpp2_prs_tcam_port_set(&pe, port, add); 1843 1844 mvpp2_prs_hw_write(priv, &pe); 1845 } 1846 1847 /* Accept multicast */ 1848 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1849 bool add) 1850 { 1851 struct mvpp2_prs_entry pe; 1852 unsigned char da_mc; 1853 1854 /* Ethernet multicast address first byte is 1855 * 0x01 for IPv4 and 0x33 for IPv6 1856 */ 1857 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1858 1859 if (priv->prs_shadow[index].valid) { 1860 /* Entry exist - update port only */ 1861 pe.index = index; 1862 mvpp2_prs_hw_read(priv, &pe); 1863 } else { 1864 /* Entry doesn't exist - create new */ 1865 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1866 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1867 pe.index = index; 1868 1869 /* Continue - set next lookup */ 1870 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1871 1872 /* Set result info bits */ 1873 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1874 MVPP2_PRS_RI_L2_CAST_MASK); 1875 1876 /* Update tcam entry data first byte */ 1877 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1878 1879 /* Shift to ethertype */ 1880 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1882 1883 /* Mask all ports */ 1884 mvpp2_prs_tcam_port_map_set(&pe, 0); 1885 1886 /* Update shadow table */ 1887 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1888 } 1889 1890 /* Update port mask */ 1891 mvpp2_prs_tcam_port_set(&pe, port, add); 1892 1893 mvpp2_prs_hw_write(priv, &pe); 1894 } 1895 1896 /* Parser per-port initialization */ 1897 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1898 int lu_max, int offset) 1899 { 1900 u32 val; 1901 1902 /* Set lookup ID */ 1903 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1904 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1905 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1906 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1907 1908 /* Set maximum number of loops for packet received from port */ 1909 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1910 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1911 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1912 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1913 1914 /* Set initial offset for packet header extraction for the first 1915 * searching loop 1916 */ 1917 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1918 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1919 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1920 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1921 } 1922 1923 /* Default flow entries initialization for all ports */ 1924 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1925 { 1926 struct mvpp2_prs_entry pe; 1927 int port; 1928 1929 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1930 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1931 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1932 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1933 1934 /* Mask all ports */ 1935 mvpp2_prs_tcam_port_map_set(&pe, 0); 1936 1937 /* Set flow ID*/ 1938 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1939 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1940 1941 /* Update shadow table and hw entry */ 1942 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1943 mvpp2_prs_hw_write(priv, &pe); 1944 } 1945 } 1946 1947 /* Set default entry for Marvell Header field */ 1948 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1949 { 1950 struct mvpp2_prs_entry pe; 1951 1952 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1953 1954 pe.index = MVPP2_PE_MH_DEFAULT; 1955 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1956 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1957 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1958 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1959 1960 /* Unmask all ports */ 1961 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1962 1963 /* Update shadow table and hw entry */ 1964 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1965 mvpp2_prs_hw_write(priv, &pe); 1966 } 1967 1968 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1969 * multicast MAC addresses 1970 */ 1971 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1972 { 1973 struct mvpp2_prs_entry pe; 1974 1975 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1976 1977 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1978 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1979 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1980 1981 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1982 MVPP2_PRS_RI_DROP_MASK); 1983 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1984 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1985 1986 /* Unmask all ports */ 1987 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1988 1989 /* Update shadow table and hw entry */ 1990 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1991 mvpp2_prs_hw_write(priv, &pe); 1992 1993 /* place holders only - no ports */ 1994 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1995 mvpp2_prs_mac_promisc_set(priv, 0, false); 1996 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1997 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1998 } 1999 2000 /* Match basic ethertypes */ 2001 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 2002 { 2003 struct mvpp2_prs_entry pe; 2004 int tid; 2005 2006 /* Ethertype: PPPoE */ 2007 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2008 MVPP2_PE_LAST_FREE_TID); 2009 if (tid < 0) 2010 return tid; 2011 2012 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2013 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2014 pe.index = tid; 2015 2016 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 2017 2018 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 2019 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2020 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 2021 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 2022 MVPP2_PRS_RI_PPPOE_MASK); 2023 2024 /* Update shadow table and hw entry */ 2025 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2026 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2027 priv->prs_shadow[pe.index].finish = false; 2028 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2029 MVPP2_PRS_RI_PPPOE_MASK); 2030 mvpp2_prs_hw_write(priv, &pe); 2031 2032 /* Ethertype: ARP */ 2033 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2034 MVPP2_PE_LAST_FREE_TID); 2035 if (tid < 0) 2036 return tid; 2037 2038 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2039 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2040 pe.index = tid; 2041 2042 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2043 2044 /* Generate flow in the next iteration*/ 2045 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2046 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2047 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2048 MVPP2_PRS_RI_L3_PROTO_MASK); 2049 /* Set L3 offset */ 2050 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2051 MVPP2_ETH_TYPE_LEN, 2052 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2053 2054 /* Update shadow table and hw entry */ 2055 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2056 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2057 priv->prs_shadow[pe.index].finish = true; 2058 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2059 MVPP2_PRS_RI_L3_PROTO_MASK); 2060 mvpp2_prs_hw_write(priv, &pe); 2061 2062 /* Ethertype: LBTD */ 2063 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2064 MVPP2_PE_LAST_FREE_TID); 2065 if (tid < 0) 2066 return tid; 2067 2068 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2069 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2070 pe.index = tid; 2071 2072 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2073 2074 /* Generate flow in the next iteration*/ 2075 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2076 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2077 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2078 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2079 MVPP2_PRS_RI_CPU_CODE_MASK | 2080 MVPP2_PRS_RI_UDF3_MASK); 2081 /* Set L3 offset */ 2082 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2083 MVPP2_ETH_TYPE_LEN, 2084 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2085 2086 /* Update shadow table and hw entry */ 2087 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2088 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2089 priv->prs_shadow[pe.index].finish = true; 2090 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2091 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2092 MVPP2_PRS_RI_CPU_CODE_MASK | 2093 MVPP2_PRS_RI_UDF3_MASK); 2094 mvpp2_prs_hw_write(priv, &pe); 2095 2096 /* Ethertype: IPv4 without options */ 2097 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2098 MVPP2_PE_LAST_FREE_TID); 2099 if (tid < 0) 2100 return tid; 2101 2102 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2103 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2104 pe.index = tid; 2105 2106 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2107 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2108 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2109 MVPP2_PRS_IPV4_HEAD_MASK | 2110 MVPP2_PRS_IPV4_IHL_MASK); 2111 2112 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2113 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2114 MVPP2_PRS_RI_L3_PROTO_MASK); 2115 /* Skip eth_type + 4 bytes of IP header */ 2116 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2117 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2118 /* Set L3 offset */ 2119 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2120 MVPP2_ETH_TYPE_LEN, 2121 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2122 2123 /* Update shadow table and hw entry */ 2124 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2125 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2126 priv->prs_shadow[pe.index].finish = false; 2127 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2128 MVPP2_PRS_RI_L3_PROTO_MASK); 2129 mvpp2_prs_hw_write(priv, &pe); 2130 2131 /* Ethertype: IPv4 with options */ 2132 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2133 MVPP2_PE_LAST_FREE_TID); 2134 if (tid < 0) 2135 return tid; 2136 2137 pe.index = tid; 2138 2139 /* Clear tcam data before updating */ 2140 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2141 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2142 2143 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2144 MVPP2_PRS_IPV4_HEAD, 2145 MVPP2_PRS_IPV4_HEAD_MASK); 2146 2147 /* Clear ri before updating */ 2148 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2149 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2150 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2151 MVPP2_PRS_RI_L3_PROTO_MASK); 2152 2153 /* Update shadow table and hw entry */ 2154 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2155 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2156 priv->prs_shadow[pe.index].finish = false; 2157 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2158 MVPP2_PRS_RI_L3_PROTO_MASK); 2159 mvpp2_prs_hw_write(priv, &pe); 2160 2161 /* Ethertype: IPv6 without options */ 2162 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2163 MVPP2_PE_LAST_FREE_TID); 2164 if (tid < 0) 2165 return tid; 2166 2167 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2168 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2169 pe.index = tid; 2170 2171 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2172 2173 /* Skip DIP of IPV6 header */ 2174 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2175 MVPP2_MAX_L3_ADDR_SIZE, 2176 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2177 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2178 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2179 MVPP2_PRS_RI_L3_PROTO_MASK); 2180 /* Set L3 offset */ 2181 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2182 MVPP2_ETH_TYPE_LEN, 2183 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2184 2185 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2186 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2187 priv->prs_shadow[pe.index].finish = false; 2188 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2189 MVPP2_PRS_RI_L3_PROTO_MASK); 2190 mvpp2_prs_hw_write(priv, &pe); 2191 2192 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2193 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2194 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2195 pe.index = MVPP2_PE_ETH_TYPE_UN; 2196 2197 /* Unmask all ports */ 2198 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2199 2200 /* Generate flow in the next iteration*/ 2201 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2202 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2203 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2204 MVPP2_PRS_RI_L3_PROTO_MASK); 2205 /* Set L3 offset even it's unknown L3 */ 2206 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2207 MVPP2_ETH_TYPE_LEN, 2208 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2209 2210 /* Update shadow table and hw entry */ 2211 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2212 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2213 priv->prs_shadow[pe.index].finish = true; 2214 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2215 MVPP2_PRS_RI_L3_PROTO_MASK); 2216 mvpp2_prs_hw_write(priv, &pe); 2217 2218 return 0; 2219 } 2220 2221 /* Parser default initialization */ 2222 static int mvpp2_prs_default_init(struct udevice *dev, 2223 struct mvpp2 *priv) 2224 { 2225 int err, index, i; 2226 2227 /* Enable tcam table */ 2228 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2229 2230 /* Clear all tcam and sram entries */ 2231 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2232 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2233 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2234 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2235 2236 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2237 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2238 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2239 } 2240 2241 /* Invalidate all tcam entries */ 2242 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2243 mvpp2_prs_hw_inv(priv, index); 2244 2245 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2246 sizeof(struct mvpp2_prs_shadow), 2247 GFP_KERNEL); 2248 if (!priv->prs_shadow) 2249 return -ENOMEM; 2250 2251 /* Always start from lookup = 0 */ 2252 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2253 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2254 MVPP2_PRS_PORT_LU_MAX, 0); 2255 2256 mvpp2_prs_def_flow_init(priv); 2257 2258 mvpp2_prs_mh_init(priv); 2259 2260 mvpp2_prs_mac_init(priv); 2261 2262 err = mvpp2_prs_etype_init(priv); 2263 if (err) 2264 return err; 2265 2266 return 0; 2267 } 2268 2269 /* Compare MAC DA with tcam entry data */ 2270 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2271 const u8 *da, unsigned char *mask) 2272 { 2273 unsigned char tcam_byte, tcam_mask; 2274 int index; 2275 2276 for (index = 0; index < ETH_ALEN; index++) { 2277 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2278 if (tcam_mask != mask[index]) 2279 return false; 2280 2281 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2282 return false; 2283 } 2284 2285 return true; 2286 } 2287 2288 /* Find tcam entry with matched pair <MAC DA, port> */ 2289 static struct mvpp2_prs_entry * 2290 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2291 unsigned char *mask, int udf_type) 2292 { 2293 struct mvpp2_prs_entry *pe; 2294 int tid; 2295 2296 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2297 if (!pe) 2298 return NULL; 2299 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2300 2301 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2302 for (tid = MVPP2_PE_FIRST_FREE_TID; 2303 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2304 unsigned int entry_pmap; 2305 2306 if (!priv->prs_shadow[tid].valid || 2307 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2308 (priv->prs_shadow[tid].udf != udf_type)) 2309 continue; 2310 2311 pe->index = tid; 2312 mvpp2_prs_hw_read(priv, pe); 2313 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2314 2315 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2316 entry_pmap == pmap) 2317 return pe; 2318 } 2319 kfree(pe); 2320 2321 return NULL; 2322 } 2323 2324 /* Update parser's mac da entry */ 2325 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2326 const u8 *da, bool add) 2327 { 2328 struct mvpp2_prs_entry *pe; 2329 unsigned int pmap, len, ri; 2330 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2331 int tid; 2332 2333 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2334 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2335 MVPP2_PRS_UDF_MAC_DEF); 2336 2337 /* No such entry */ 2338 if (!pe) { 2339 if (!add) 2340 return 0; 2341 2342 /* Create new TCAM entry */ 2343 /* Find first range mac entry*/ 2344 for (tid = MVPP2_PE_FIRST_FREE_TID; 2345 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2346 if (priv->prs_shadow[tid].valid && 2347 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2348 (priv->prs_shadow[tid].udf == 2349 MVPP2_PRS_UDF_MAC_RANGE)) 2350 break; 2351 2352 /* Go through the all entries from first to last */ 2353 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2354 tid - 1); 2355 if (tid < 0) 2356 return tid; 2357 2358 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2359 if (!pe) 2360 return -1; 2361 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2362 pe->index = tid; 2363 2364 /* Mask all ports */ 2365 mvpp2_prs_tcam_port_map_set(pe, 0); 2366 } 2367 2368 /* Update port mask */ 2369 mvpp2_prs_tcam_port_set(pe, port, add); 2370 2371 /* Invalidate the entry if no ports are left enabled */ 2372 pmap = mvpp2_prs_tcam_port_map_get(pe); 2373 if (pmap == 0) { 2374 if (add) { 2375 kfree(pe); 2376 return -1; 2377 } 2378 mvpp2_prs_hw_inv(priv, pe->index); 2379 priv->prs_shadow[pe->index].valid = false; 2380 kfree(pe); 2381 return 0; 2382 } 2383 2384 /* Continue - set next lookup */ 2385 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2386 2387 /* Set match on DA */ 2388 len = ETH_ALEN; 2389 while (len--) 2390 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2391 2392 /* Set result info bits */ 2393 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2394 2395 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2396 MVPP2_PRS_RI_MAC_ME_MASK); 2397 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2398 MVPP2_PRS_RI_MAC_ME_MASK); 2399 2400 /* Shift to ethertype */ 2401 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2402 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2403 2404 /* Update shadow table and hw entry */ 2405 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2406 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2407 mvpp2_prs_hw_write(priv, pe); 2408 2409 kfree(pe); 2410 2411 return 0; 2412 } 2413 2414 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2415 { 2416 int err; 2417 2418 /* Remove old parser entry */ 2419 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2420 false); 2421 if (err) 2422 return err; 2423 2424 /* Add new parser entry */ 2425 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2426 if (err) 2427 return err; 2428 2429 /* Set addr in the device */ 2430 memcpy(port->dev_addr, da, ETH_ALEN); 2431 2432 return 0; 2433 } 2434 2435 /* Set prs flow for the port */ 2436 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2437 { 2438 struct mvpp2_prs_entry *pe; 2439 int tid; 2440 2441 pe = mvpp2_prs_flow_find(port->priv, port->id); 2442 2443 /* Such entry not exist */ 2444 if (!pe) { 2445 /* Go through the all entires from last to first */ 2446 tid = mvpp2_prs_tcam_first_free(port->priv, 2447 MVPP2_PE_LAST_FREE_TID, 2448 MVPP2_PE_FIRST_FREE_TID); 2449 if (tid < 0) 2450 return tid; 2451 2452 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2453 if (!pe) 2454 return -ENOMEM; 2455 2456 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2457 pe->index = tid; 2458 2459 /* Set flow ID*/ 2460 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2461 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2462 2463 /* Update shadow table */ 2464 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2465 } 2466 2467 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2468 mvpp2_prs_hw_write(port->priv, pe); 2469 kfree(pe); 2470 2471 return 0; 2472 } 2473 2474 /* Classifier configuration routines */ 2475 2476 /* Update classification flow table registers */ 2477 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2478 struct mvpp2_cls_flow_entry *fe) 2479 { 2480 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2481 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2482 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2483 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2484 } 2485 2486 /* Update classification lookup table register */ 2487 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2488 struct mvpp2_cls_lookup_entry *le) 2489 { 2490 u32 val; 2491 2492 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2493 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2494 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2495 } 2496 2497 /* Classifier default initialization */ 2498 static void mvpp2_cls_init(struct mvpp2 *priv) 2499 { 2500 struct mvpp2_cls_lookup_entry le; 2501 struct mvpp2_cls_flow_entry fe; 2502 int index; 2503 2504 /* Enable classifier */ 2505 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2506 2507 /* Clear classifier flow table */ 2508 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2509 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2510 fe.index = index; 2511 mvpp2_cls_flow_write(priv, &fe); 2512 } 2513 2514 /* Clear classifier lookup table */ 2515 le.data = 0; 2516 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2517 le.lkpid = index; 2518 le.way = 0; 2519 mvpp2_cls_lookup_write(priv, &le); 2520 2521 le.way = 1; 2522 mvpp2_cls_lookup_write(priv, &le); 2523 } 2524 } 2525 2526 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2527 { 2528 struct mvpp2_cls_lookup_entry le; 2529 u32 val; 2530 2531 /* Set way for the port */ 2532 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2533 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2534 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2535 2536 /* Pick the entry to be accessed in lookup ID decoding table 2537 * according to the way and lkpid. 2538 */ 2539 le.lkpid = port->id; 2540 le.way = 0; 2541 le.data = 0; 2542 2543 /* Set initial CPU queue for receiving packets */ 2544 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2545 le.data |= port->first_rxq; 2546 2547 /* Disable classification engines */ 2548 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2549 2550 /* Update lookup ID table entry */ 2551 mvpp2_cls_lookup_write(port->priv, &le); 2552 } 2553 2554 /* Set CPU queue number for oversize packets */ 2555 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2556 { 2557 u32 val; 2558 2559 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2560 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2561 2562 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2563 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2564 2565 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2566 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2567 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2568 } 2569 2570 /* Buffer Manager configuration routines */ 2571 2572 /* Create pool */ 2573 static int mvpp2_bm_pool_create(struct udevice *dev, 2574 struct mvpp2 *priv, 2575 struct mvpp2_bm_pool *bm_pool, int size) 2576 { 2577 u32 val; 2578 2579 /* Number of buffer pointers must be a multiple of 16, as per 2580 * hardware constraints 2581 */ 2582 if (!IS_ALIGNED(size, 16)) 2583 return -EINVAL; 2584 2585 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2586 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2587 if (!bm_pool->virt_addr) 2588 return -ENOMEM; 2589 2590 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2591 MVPP2_BM_POOL_PTR_ALIGN)) { 2592 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2593 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2594 return -ENOMEM; 2595 } 2596 2597 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2598 lower_32_bits(bm_pool->dma_addr)); 2599 if (priv->hw_version == MVPP22) 2600 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG, 2601 (upper_32_bits(bm_pool->dma_addr) & 2602 MVPP22_BM_POOL_BASE_HIGH_MASK)); 2603 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2604 2605 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2606 val |= MVPP2_BM_START_MASK; 2607 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2608 2609 bm_pool->type = MVPP2_BM_FREE; 2610 bm_pool->size = size; 2611 bm_pool->pkt_size = 0; 2612 bm_pool->buf_num = 0; 2613 2614 return 0; 2615 } 2616 2617 /* Set pool buffer size */ 2618 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2619 struct mvpp2_bm_pool *bm_pool, 2620 int buf_size) 2621 { 2622 u32 val; 2623 2624 bm_pool->buf_size = buf_size; 2625 2626 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2627 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2628 } 2629 2630 /* Free all buffers from the pool */ 2631 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2632 struct mvpp2_bm_pool *bm_pool) 2633 { 2634 int i; 2635 2636 for (i = 0; i < bm_pool->buf_num; i++) { 2637 /* Allocate buffer back from the buffer manager */ 2638 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2639 } 2640 2641 bm_pool->buf_num = 0; 2642 } 2643 2644 /* Cleanup pool */ 2645 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2646 struct mvpp2 *priv, 2647 struct mvpp2_bm_pool *bm_pool) 2648 { 2649 u32 val; 2650 2651 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2652 if (bm_pool->buf_num) { 2653 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2654 return 0; 2655 } 2656 2657 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2658 val |= MVPP2_BM_STOP_MASK; 2659 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2660 2661 return 0; 2662 } 2663 2664 static int mvpp2_bm_pools_init(struct udevice *dev, 2665 struct mvpp2 *priv) 2666 { 2667 int i, err, size; 2668 struct mvpp2_bm_pool *bm_pool; 2669 2670 /* Create all pools with maximum size */ 2671 size = MVPP2_BM_POOL_SIZE_MAX; 2672 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2673 bm_pool = &priv->bm_pools[i]; 2674 bm_pool->id = i; 2675 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2676 if (err) 2677 goto err_unroll_pools; 2678 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 2679 } 2680 return 0; 2681 2682 err_unroll_pools: 2683 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2684 for (i = i - 1; i >= 0; i--) 2685 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2686 return err; 2687 } 2688 2689 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2690 { 2691 int i, err; 2692 2693 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2694 /* Mask BM all interrupts */ 2695 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2696 /* Clear BM cause register */ 2697 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2698 } 2699 2700 /* Allocate and initialize BM pools */ 2701 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2702 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2703 if (!priv->bm_pools) 2704 return -ENOMEM; 2705 2706 err = mvpp2_bm_pools_init(dev, priv); 2707 if (err < 0) 2708 return err; 2709 return 0; 2710 } 2711 2712 /* Attach long pool to rxq */ 2713 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2714 int lrxq, int long_pool) 2715 { 2716 u32 val, mask; 2717 int prxq; 2718 2719 /* Get queue physical ID */ 2720 prxq = port->rxqs[lrxq]->id; 2721 2722 if (port->priv->hw_version == MVPP21) 2723 mask = MVPP21_RXQ_POOL_LONG_MASK; 2724 else 2725 mask = MVPP22_RXQ_POOL_LONG_MASK; 2726 2727 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2728 val &= ~mask; 2729 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2730 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2731 } 2732 2733 /* Set pool number in a BM cookie */ 2734 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2735 { 2736 u32 bm; 2737 2738 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2739 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2740 2741 return bm; 2742 } 2743 2744 /* Get pool number from a BM cookie */ 2745 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2746 { 2747 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2748 } 2749 2750 /* Release buffer to BM */ 2751 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2752 dma_addr_t buf_dma_addr, 2753 unsigned long buf_phys_addr) 2754 { 2755 if (port->priv->hw_version == MVPP22) { 2756 u32 val = 0; 2757 2758 if (sizeof(dma_addr_t) == 8) 2759 val |= upper_32_bits(buf_dma_addr) & 2760 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2761 2762 if (sizeof(phys_addr_t) == 8) 2763 val |= (upper_32_bits(buf_phys_addr) 2764 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2765 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2766 2767 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2768 } 2769 2770 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2771 * returned in the "cookie" field of the RX 2772 * descriptor. Instead of storing the virtual address, we 2773 * store the physical address 2774 */ 2775 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2776 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2777 } 2778 2779 /* Refill BM pool */ 2780 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2781 dma_addr_t dma_addr, 2782 phys_addr_t phys_addr) 2783 { 2784 int pool = mvpp2_bm_cookie_pool_get(bm); 2785 2786 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2787 } 2788 2789 /* Allocate buffers for the pool */ 2790 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2791 struct mvpp2_bm_pool *bm_pool, int buf_num) 2792 { 2793 int i; 2794 2795 if (buf_num < 0 || 2796 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2797 netdev_err(port->dev, 2798 "cannot allocate %d buffers for pool %d\n", 2799 buf_num, bm_pool->id); 2800 return 0; 2801 } 2802 2803 for (i = 0; i < buf_num; i++) { 2804 mvpp2_bm_pool_put(port, bm_pool->id, 2805 (dma_addr_t)buffer_loc.rx_buffer[i], 2806 (unsigned long)buffer_loc.rx_buffer[i]); 2807 2808 } 2809 2810 /* Update BM driver with number of buffers added to pool */ 2811 bm_pool->buf_num += i; 2812 2813 return i; 2814 } 2815 2816 /* Notify the driver that BM pool is being used as specific type and return the 2817 * pool pointer on success 2818 */ 2819 static struct mvpp2_bm_pool * 2820 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2821 int pkt_size) 2822 { 2823 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2824 int num; 2825 2826 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2827 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2828 return NULL; 2829 } 2830 2831 if (new_pool->type == MVPP2_BM_FREE) 2832 new_pool->type = type; 2833 2834 /* Allocate buffers in case BM pool is used as long pool, but packet 2835 * size doesn't match MTU or BM pool hasn't being used yet 2836 */ 2837 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2838 (new_pool->pkt_size == 0)) { 2839 int pkts_num; 2840 2841 /* Set default buffer number or free all the buffers in case 2842 * the pool is not empty 2843 */ 2844 pkts_num = new_pool->buf_num; 2845 if (pkts_num == 0) 2846 pkts_num = type == MVPP2_BM_SWF_LONG ? 2847 MVPP2_BM_LONG_BUF_NUM : 2848 MVPP2_BM_SHORT_BUF_NUM; 2849 else 2850 mvpp2_bm_bufs_free(NULL, 2851 port->priv, new_pool); 2852 2853 new_pool->pkt_size = pkt_size; 2854 2855 /* Allocate buffers for this pool */ 2856 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2857 if (num != pkts_num) { 2858 dev_err(dev, "pool %d: %d of %d allocated\n", 2859 new_pool->id, num, pkts_num); 2860 return NULL; 2861 } 2862 } 2863 2864 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 2865 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 2866 2867 return new_pool; 2868 } 2869 2870 /* Initialize pools for swf */ 2871 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2872 { 2873 int rxq; 2874 2875 if (!port->pool_long) { 2876 port->pool_long = 2877 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2878 MVPP2_BM_SWF_LONG, 2879 port->pkt_size); 2880 if (!port->pool_long) 2881 return -ENOMEM; 2882 2883 port->pool_long->port_map |= (1 << port->id); 2884 2885 for (rxq = 0; rxq < rxq_number; rxq++) 2886 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2887 } 2888 2889 return 0; 2890 } 2891 2892 /* Port configuration routines */ 2893 2894 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2895 { 2896 u32 val; 2897 2898 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2899 2900 switch (port->phy_interface) { 2901 case PHY_INTERFACE_MODE_SGMII: 2902 val |= MVPP2_GMAC_INBAND_AN_MASK; 2903 break; 2904 case PHY_INTERFACE_MODE_RGMII: 2905 case PHY_INTERFACE_MODE_RGMII_ID: 2906 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2907 default: 2908 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2909 } 2910 2911 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2912 } 2913 2914 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2915 { 2916 u32 val; 2917 2918 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2919 val |= MVPP2_GMAC_FC_ADV_EN; 2920 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2921 } 2922 2923 static void mvpp2_port_enable(struct mvpp2_port *port) 2924 { 2925 u32 val; 2926 2927 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2928 val |= MVPP2_GMAC_PORT_EN_MASK; 2929 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2930 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2931 } 2932 2933 static void mvpp2_port_disable(struct mvpp2_port *port) 2934 { 2935 u32 val; 2936 2937 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2938 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2939 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2940 } 2941 2942 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2943 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2944 { 2945 u32 val; 2946 2947 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2948 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2949 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2950 } 2951 2952 /* Configure loopback port */ 2953 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2954 { 2955 u32 val; 2956 2957 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2958 2959 if (port->speed == 1000) 2960 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2961 else 2962 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2963 2964 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2965 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2966 else 2967 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2968 2969 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2970 } 2971 2972 static void mvpp2_port_reset(struct mvpp2_port *port) 2973 { 2974 u32 val; 2975 2976 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2977 ~MVPP2_GMAC_PORT_RESET_MASK; 2978 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2979 2980 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2981 MVPP2_GMAC_PORT_RESET_MASK) 2982 continue; 2983 } 2984 2985 /* Change maximum receive size of the port */ 2986 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2987 { 2988 u32 val; 2989 2990 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2991 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2992 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2993 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2994 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2995 } 2996 2997 /* PPv2.2 GoP/GMAC config */ 2998 2999 /* Set the MAC to reset or exit from reset */ 3000 static int gop_gmac_reset(struct mvpp2_port *port, int reset) 3001 { 3002 u32 val; 3003 3004 /* read - modify - write */ 3005 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3006 if (reset) 3007 val |= MVPP2_GMAC_PORT_RESET_MASK; 3008 else 3009 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 3010 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3011 3012 return 0; 3013 } 3014 3015 /* 3016 * gop_gpcs_mode_cfg 3017 * 3018 * Configure port to working with Gig PCS or don't. 3019 */ 3020 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 3021 { 3022 u32 val; 3023 3024 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3025 if (en) 3026 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3027 else 3028 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3029 /* enable / disable PCS on this port */ 3030 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3031 3032 return 0; 3033 } 3034 3035 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3036 { 3037 u32 val; 3038 3039 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3040 if (en) 3041 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3042 else 3043 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3044 /* enable / disable PCS on this port */ 3045 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3046 3047 return 0; 3048 } 3049 3050 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port) 3051 { 3052 u32 val, thresh; 3053 3054 /* 3055 * Configure minimal level of the Tx FIFO before the lower part 3056 * starts to read a packet 3057 */ 3058 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3059 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3060 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3061 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3062 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3063 3064 /* Disable bypass of sync module */ 3065 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3066 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3067 /* configure DP clock select according to mode */ 3068 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3069 /* configure QSGMII bypass according to mode */ 3070 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3071 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3072 3073 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3074 /* 3075 * Configure GIG MAC to 1000Base-X mode connected to a fiber 3076 * transceiver 3077 */ 3078 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3079 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3080 3081 /* configure AN 0x9268 */ 3082 val = MVPP2_GMAC_EN_PCS_AN | 3083 MVPP2_GMAC_AN_BYPASS_EN | 3084 MVPP2_GMAC_CONFIG_MII_SPEED | 3085 MVPP2_GMAC_CONFIG_GMII_SPEED | 3086 MVPP2_GMAC_FC_ADV_EN | 3087 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3088 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3089 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3090 } 3091 3092 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3093 { 3094 u32 val, thresh; 3095 3096 /* 3097 * Configure minimal level of the Tx FIFO before the lower part 3098 * starts to read a packet 3099 */ 3100 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3101 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3102 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3103 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3104 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3105 3106 /* Disable bypass of sync module */ 3107 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3108 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3109 /* configure DP clock select according to mode */ 3110 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3111 /* configure QSGMII bypass according to mode */ 3112 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3113 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3114 3115 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3116 /* configure GIG MAC to SGMII mode */ 3117 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3118 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3119 3120 /* configure AN */ 3121 val = MVPP2_GMAC_EN_PCS_AN | 3122 MVPP2_GMAC_AN_BYPASS_EN | 3123 MVPP2_GMAC_AN_SPEED_EN | 3124 MVPP2_GMAC_EN_FC_AN | 3125 MVPP2_GMAC_AN_DUPLEX_EN | 3126 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3127 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3128 } 3129 3130 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3131 { 3132 u32 val, thresh; 3133 3134 /* 3135 * Configure minimal level of the Tx FIFO before the lower part 3136 * starts to read a packet 3137 */ 3138 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3139 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3140 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3141 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3142 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3143 3144 /* Disable bypass of sync module */ 3145 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3146 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3147 /* configure DP clock select according to mode */ 3148 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3149 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3150 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3151 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3152 3153 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3154 /* configure GIG MAC to SGMII mode */ 3155 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3156 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3157 3158 /* configure AN 0xb8e8 */ 3159 val = MVPP2_GMAC_AN_BYPASS_EN | 3160 MVPP2_GMAC_AN_SPEED_EN | 3161 MVPP2_GMAC_EN_FC_AN | 3162 MVPP2_GMAC_AN_DUPLEX_EN | 3163 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3164 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3165 } 3166 3167 /* Set the internal mux's to the required MAC in the GOP */ 3168 static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3169 { 3170 u32 val; 3171 3172 /* Set TX FIFO thresholds */ 3173 switch (port->phy_interface) { 3174 case PHY_INTERFACE_MODE_SGMII: 3175 if (port->phy_speed == 2500) 3176 gop_gmac_sgmii2_5_cfg(port); 3177 else 3178 gop_gmac_sgmii_cfg(port); 3179 break; 3180 3181 case PHY_INTERFACE_MODE_RGMII: 3182 case PHY_INTERFACE_MODE_RGMII_ID: 3183 gop_gmac_rgmii_cfg(port); 3184 break; 3185 3186 default: 3187 return -1; 3188 } 3189 3190 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3191 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3192 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3193 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3194 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3195 3196 /* PeriodicXonEn disable */ 3197 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3198 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3199 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3200 3201 return 0; 3202 } 3203 3204 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3205 { 3206 u32 val; 3207 3208 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3209 if (port->gop_id > 0) 3210 return; 3211 3212 /* configure 1Gig MAC mode */ 3213 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3214 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3215 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3216 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3217 } 3218 3219 static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3220 { 3221 u32 val; 3222 3223 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3224 if (reset) 3225 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3226 else 3227 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3228 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3229 3230 return 0; 3231 } 3232 3233 /* Set the internal mux's to the required PCS in the PI */ 3234 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes) 3235 { 3236 u32 val; 3237 int lane; 3238 3239 switch (num_of_lanes) { 3240 case 1: 3241 lane = 0; 3242 break; 3243 case 2: 3244 lane = 1; 3245 break; 3246 case 4: 3247 lane = 2; 3248 break; 3249 default: 3250 return -1; 3251 } 3252 3253 /* configure XG MAC mode */ 3254 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3255 val &= ~MVPP22_XPCS_PCSMODE_MASK; 3256 val &= ~MVPP22_XPCS_LANEACTIVE_MASK; 3257 val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS; 3258 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3259 3260 return 0; 3261 } 3262 3263 static int gop_mpcs_mode(struct mvpp2_port *port) 3264 { 3265 u32 val; 3266 3267 /* configure PCS40G COMMON CONTROL */ 3268 val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3269 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3270 writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL); 3271 3272 /* configure PCS CLOCK RESET */ 3273 val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET); 3274 val &= ~CLK_DIVISION_RATIO_MASK; 3275 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3276 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3277 3278 val &= ~CLK_DIV_PHASE_SET_MASK; 3279 val |= MAC_CLK_RESET_MASK; 3280 val |= RX_SD_CLK_RESET_MASK; 3281 val |= TX_SD_CLK_RESET_MASK; 3282 writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET); 3283 3284 return 0; 3285 } 3286 3287 /* Set the internal mux's to the required MAC in the GOP */ 3288 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3289 { 3290 u32 val; 3291 3292 /* configure 10G MAC mode */ 3293 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3294 val |= MVPP22_XLG_RX_FC_EN; 3295 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3296 3297 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3298 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3299 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3300 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3301 3302 /* read - modify - write */ 3303 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3304 val &= ~MVPP22_XLG_MODE_DMA_1G; 3305 val |= MVPP22_XLG_FORWARD_PFC_EN; 3306 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3307 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3308 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3309 3310 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3311 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3312 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3313 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3314 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3315 3316 /* unmask link change interrupt */ 3317 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3318 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3319 val |= 1; /* unmask summary bit */ 3320 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3321 3322 return 0; 3323 } 3324 3325 /* Set PCS to reset or exit from reset */ 3326 static int gop_xpcs_reset(struct mvpp2_port *port, int reset) 3327 { 3328 u32 val; 3329 3330 /* read - modify - write */ 3331 val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3332 if (reset) 3333 val &= ~MVPP22_XPCS_PCSRESET; 3334 else 3335 val |= MVPP22_XPCS_PCSRESET; 3336 writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG); 3337 3338 return 0; 3339 } 3340 3341 /* Set the MAC to reset or exit from reset */ 3342 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3343 { 3344 u32 val; 3345 3346 /* read - modify - write */ 3347 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3348 if (reset) 3349 val &= ~MVPP22_XLG_MAC_RESETN; 3350 else 3351 val |= MVPP22_XLG_MAC_RESETN; 3352 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3353 3354 return 0; 3355 } 3356 3357 /* 3358 * gop_port_init 3359 * 3360 * Init physical port. Configures the port mode and all it's elements 3361 * accordingly. 3362 * Does not verify that the selected mode/port number is valid at the 3363 * core level. 3364 */ 3365 static int gop_port_init(struct mvpp2_port *port) 3366 { 3367 int mac_num = port->gop_id; 3368 int num_of_act_lanes; 3369 3370 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3371 netdev_err(NULL, "%s: illegal port number %d", __func__, 3372 mac_num); 3373 return -1; 3374 } 3375 3376 switch (port->phy_interface) { 3377 case PHY_INTERFACE_MODE_RGMII: 3378 case PHY_INTERFACE_MODE_RGMII_ID: 3379 gop_gmac_reset(port, 1); 3380 3381 /* configure PCS */ 3382 gop_gpcs_mode_cfg(port, 0); 3383 gop_bypass_clk_cfg(port, 1); 3384 3385 /* configure MAC */ 3386 gop_gmac_mode_cfg(port); 3387 /* pcs unreset */ 3388 gop_gpcs_reset(port, 0); 3389 3390 /* mac unreset */ 3391 gop_gmac_reset(port, 0); 3392 break; 3393 3394 case PHY_INTERFACE_MODE_SGMII: 3395 /* configure PCS */ 3396 gop_gpcs_mode_cfg(port, 1); 3397 3398 /* configure MAC */ 3399 gop_gmac_mode_cfg(port); 3400 /* select proper Mac mode */ 3401 gop_xlg_2_gig_mac_cfg(port); 3402 3403 /* pcs unreset */ 3404 gop_gpcs_reset(port, 0); 3405 /* mac unreset */ 3406 gop_gmac_reset(port, 0); 3407 break; 3408 3409 case PHY_INTERFACE_MODE_SFI: 3410 num_of_act_lanes = 2; 3411 mac_num = 0; 3412 /* configure PCS */ 3413 gop_xpcs_mode(port, num_of_act_lanes); 3414 gop_mpcs_mode(port); 3415 /* configure MAC */ 3416 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3417 3418 /* pcs unreset */ 3419 gop_xpcs_reset(port, 0); 3420 3421 /* mac unreset */ 3422 gop_xlg_mac_reset(port, 0); 3423 break; 3424 3425 default: 3426 netdev_err(NULL, "%s: Requested port mode (%d) not supported\n", 3427 __func__, port->phy_interface); 3428 return -1; 3429 } 3430 3431 return 0; 3432 } 3433 3434 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3435 { 3436 u32 val; 3437 3438 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3439 if (enable) { 3440 /* Enable port and MIB counters update */ 3441 val |= MVPP22_XLG_PORT_EN; 3442 val &= ~MVPP22_XLG_MIBCNT_DIS; 3443 } else { 3444 /* Disable port */ 3445 val &= ~MVPP22_XLG_PORT_EN; 3446 } 3447 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3448 } 3449 3450 static void gop_port_enable(struct mvpp2_port *port, int enable) 3451 { 3452 switch (port->phy_interface) { 3453 case PHY_INTERFACE_MODE_RGMII: 3454 case PHY_INTERFACE_MODE_RGMII_ID: 3455 case PHY_INTERFACE_MODE_SGMII: 3456 if (enable) 3457 mvpp2_port_enable(port); 3458 else 3459 mvpp2_port_disable(port); 3460 break; 3461 3462 case PHY_INTERFACE_MODE_SFI: 3463 gop_xlg_mac_port_enable(port, enable); 3464 3465 break; 3466 default: 3467 netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__, 3468 port->phy_interface); 3469 return; 3470 } 3471 } 3472 3473 /* RFU1 functions */ 3474 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3475 { 3476 return readl(priv->rfu1_base + offset); 3477 } 3478 3479 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3480 { 3481 writel(data, priv->rfu1_base + offset); 3482 } 3483 3484 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3485 { 3486 u32 val = 0; 3487 3488 if (gop_id == 2) { 3489 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3490 val |= MV_NETC_GE_MAC2_SGMII; 3491 } 3492 3493 if (gop_id == 3) { 3494 if (phy_type == PHY_INTERFACE_MODE_SGMII) 3495 val |= MV_NETC_GE_MAC3_SGMII; 3496 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3497 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3498 val |= MV_NETC_GE_MAC3_RGMII; 3499 } 3500 3501 return val; 3502 } 3503 3504 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3505 { 3506 u32 reg; 3507 3508 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3509 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3510 3511 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3512 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3513 3514 reg |= val; 3515 3516 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3517 } 3518 3519 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3520 { 3521 u32 reg; 3522 3523 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3524 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3525 3526 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3527 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3528 3529 reg |= val; 3530 3531 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3532 } 3533 3534 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3535 { 3536 u32 reg; 3537 3538 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3539 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3540 3541 val <<= NETC_GOP_SOFT_RESET_OFFS; 3542 val &= NETC_GOP_SOFT_RESET_MASK; 3543 3544 reg |= val; 3545 3546 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3547 } 3548 3549 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3550 { 3551 u32 reg; 3552 3553 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3554 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3555 3556 val <<= NETC_CLK_DIV_PHASE_OFFS; 3557 val &= NETC_CLK_DIV_PHASE_MASK; 3558 3559 reg |= val; 3560 3561 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3562 } 3563 3564 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3565 { 3566 u32 reg; 3567 3568 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3569 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3570 3571 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3572 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3573 3574 reg |= val; 3575 3576 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3577 } 3578 3579 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3580 u32 val) 3581 { 3582 u32 reg, mask, offset; 3583 3584 if (gop_id == 2) { 3585 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3586 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3587 } else { 3588 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3589 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3590 } 3591 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3592 reg &= ~mask; 3593 3594 val <<= offset; 3595 val &= mask; 3596 3597 reg |= val; 3598 3599 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3600 } 3601 3602 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3603 { 3604 u32 reg; 3605 3606 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3607 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3608 3609 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3610 val &= NETC_BUS_WIDTH_SELECT_MASK; 3611 3612 reg |= val; 3613 3614 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3615 } 3616 3617 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3618 { 3619 u32 reg; 3620 3621 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3622 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3623 3624 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3625 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3626 3627 reg |= val; 3628 3629 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3630 } 3631 3632 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3633 enum mv_netc_phase phase) 3634 { 3635 switch (phase) { 3636 case MV_NETC_FIRST_PHASE: 3637 /* Set Bus Width to HB mode = 1 */ 3638 gop_netc_bus_width_select(priv, 1); 3639 /* Select RGMII mode */ 3640 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3641 break; 3642 3643 case MV_NETC_SECOND_PHASE: 3644 /* De-assert the relevant port HB reset */ 3645 gop_netc_port_rf_reset(priv, gop_id, 1); 3646 break; 3647 } 3648 } 3649 3650 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3651 enum mv_netc_phase phase) 3652 { 3653 switch (phase) { 3654 case MV_NETC_FIRST_PHASE: 3655 /* Set Bus Width to HB mode = 1 */ 3656 gop_netc_bus_width_select(priv, 1); 3657 /* Select SGMII mode */ 3658 if (gop_id >= 1) { 3659 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3660 MV_NETC_GBE_SGMII); 3661 } 3662 3663 /* Configure the sample stages */ 3664 gop_netc_sample_stages_timing(priv, 0); 3665 /* Configure the ComPhy Selector */ 3666 /* gop_netc_com_phy_selector_config(netComplex); */ 3667 break; 3668 3669 case MV_NETC_SECOND_PHASE: 3670 /* De-assert the relevant port HB reset */ 3671 gop_netc_port_rf_reset(priv, gop_id, 1); 3672 break; 3673 } 3674 } 3675 3676 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3677 { 3678 u32 c = priv->netc_config; 3679 3680 if (c & MV_NETC_GE_MAC2_SGMII) 3681 gop_netc_mac_to_sgmii(priv, 2, phase); 3682 else 3683 gop_netc_mac_to_xgmii(priv, 2, phase); 3684 3685 if (c & MV_NETC_GE_MAC3_SGMII) { 3686 gop_netc_mac_to_sgmii(priv, 3, phase); 3687 } else { 3688 gop_netc_mac_to_xgmii(priv, 3, phase); 3689 if (c & MV_NETC_GE_MAC3_RGMII) 3690 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3691 else 3692 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3693 } 3694 3695 /* Activate gop ports 0, 2, 3 */ 3696 gop_netc_active_port(priv, 0, 1); 3697 gop_netc_active_port(priv, 2, 1); 3698 gop_netc_active_port(priv, 3, 1); 3699 3700 if (phase == MV_NETC_SECOND_PHASE) { 3701 /* Enable the GOP internal clock logic */ 3702 gop_netc_gop_clock_logic_set(priv, 1); 3703 /* De-assert GOP unit reset */ 3704 gop_netc_gop_reset(priv, 1); 3705 } 3706 3707 return 0; 3708 } 3709 3710 /* Set defaults to the MVPP2 port */ 3711 static void mvpp2_defaults_set(struct mvpp2_port *port) 3712 { 3713 int tx_port_num, val, queue, ptxq, lrxq; 3714 3715 if (port->priv->hw_version == MVPP21) { 3716 /* Configure port to loopback if needed */ 3717 if (port->flags & MVPP2_F_LOOPBACK) 3718 mvpp2_port_loopback_set(port); 3719 3720 /* Update TX FIFO MIN Threshold */ 3721 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3722 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3723 /* Min. TX threshold must be less than minimal packet length */ 3724 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3725 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3726 } 3727 3728 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3729 tx_port_num = mvpp2_egress_port(port); 3730 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3731 tx_port_num); 3732 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3733 3734 /* Close bandwidth for all queues */ 3735 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3736 ptxq = mvpp2_txq_phys(port->id, queue); 3737 mvpp2_write(port->priv, 3738 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3739 } 3740 3741 /* Set refill period to 1 usec, refill tokens 3742 * and bucket size to maximum 3743 */ 3744 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3745 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3746 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3747 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3748 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3749 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3750 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3751 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3752 3753 /* Set MaximumLowLatencyPacketSize value to 256 */ 3754 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3755 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3756 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3757 3758 /* Enable Rx cache snoop */ 3759 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3760 queue = port->rxqs[lrxq]->id; 3761 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3762 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3763 MVPP2_SNOOP_BUF_HDR_MASK; 3764 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3765 } 3766 } 3767 3768 /* Enable/disable receiving packets */ 3769 static void mvpp2_ingress_enable(struct mvpp2_port *port) 3770 { 3771 u32 val; 3772 int lrxq, queue; 3773 3774 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3775 queue = port->rxqs[lrxq]->id; 3776 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3777 val &= ~MVPP2_RXQ_DISABLE_MASK; 3778 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3779 } 3780 } 3781 3782 static void mvpp2_ingress_disable(struct mvpp2_port *port) 3783 { 3784 u32 val; 3785 int lrxq, queue; 3786 3787 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3788 queue = port->rxqs[lrxq]->id; 3789 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3790 val |= MVPP2_RXQ_DISABLE_MASK; 3791 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3792 } 3793 } 3794 3795 /* Enable transmit via physical egress queue 3796 * - HW starts take descriptors from DRAM 3797 */ 3798 static void mvpp2_egress_enable(struct mvpp2_port *port) 3799 { 3800 u32 qmap; 3801 int queue; 3802 int tx_port_num = mvpp2_egress_port(port); 3803 3804 /* Enable all initialized TXs. */ 3805 qmap = 0; 3806 for (queue = 0; queue < txq_number; queue++) { 3807 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3808 3809 if (txq->descs != NULL) 3810 qmap |= (1 << queue); 3811 } 3812 3813 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3814 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3815 } 3816 3817 /* Disable transmit via physical egress queue 3818 * - HW doesn't take descriptors from DRAM 3819 */ 3820 static void mvpp2_egress_disable(struct mvpp2_port *port) 3821 { 3822 u32 reg_data; 3823 int delay; 3824 int tx_port_num = mvpp2_egress_port(port); 3825 3826 /* Issue stop command for active channels only */ 3827 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3828 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3829 MVPP2_TXP_SCHED_ENQ_MASK; 3830 if (reg_data != 0) 3831 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3832 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3833 3834 /* Wait for all Tx activity to terminate. */ 3835 delay = 0; 3836 do { 3837 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3838 netdev_warn(port->dev, 3839 "Tx stop timed out, status=0x%08x\n", 3840 reg_data); 3841 break; 3842 } 3843 mdelay(1); 3844 delay++; 3845 3846 /* Check port TX Command register that all 3847 * Tx queues are stopped 3848 */ 3849 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3850 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3851 } 3852 3853 /* Rx descriptors helper methods */ 3854 3855 /* Get number of Rx descriptors occupied by received packets */ 3856 static inline int 3857 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3858 { 3859 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3860 3861 return val & MVPP2_RXQ_OCCUPIED_MASK; 3862 } 3863 3864 /* Update Rx queue status with the number of occupied and available 3865 * Rx descriptor slots. 3866 */ 3867 static inline void 3868 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3869 int used_count, int free_count) 3870 { 3871 /* Decrement the number of used descriptors and increment count 3872 * increment the number of free descriptors. 3873 */ 3874 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3875 3876 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3877 } 3878 3879 /* Get pointer to next RX descriptor to be processed by SW */ 3880 static inline struct mvpp2_rx_desc * 3881 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3882 { 3883 int rx_desc = rxq->next_desc_to_proc; 3884 3885 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3886 prefetch(rxq->descs + rxq->next_desc_to_proc); 3887 return rxq->descs + rx_desc; 3888 } 3889 3890 /* Set rx queue offset */ 3891 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3892 int prxq, int offset) 3893 { 3894 u32 val; 3895 3896 /* Convert offset from bytes to units of 32 bytes */ 3897 offset = offset >> 5; 3898 3899 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3900 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3901 3902 /* Offset is in */ 3903 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3904 MVPP2_RXQ_PACKET_OFFSET_MASK); 3905 3906 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3907 } 3908 3909 /* Obtain BM cookie information from descriptor */ 3910 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3911 struct mvpp2_rx_desc *rx_desc) 3912 { 3913 int cpu = smp_processor_id(); 3914 int pool; 3915 3916 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3917 MVPP2_RXD_BM_POOL_ID_MASK) >> 3918 MVPP2_RXD_BM_POOL_ID_OFFS; 3919 3920 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3921 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3922 } 3923 3924 /* Tx descriptors helper methods */ 3925 3926 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3927 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3928 struct mvpp2_tx_queue *txq) 3929 { 3930 u32 val; 3931 3932 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3933 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3934 3935 return val & MVPP2_TXQ_PENDING_MASK; 3936 } 3937 3938 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3939 static struct mvpp2_tx_desc * 3940 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3941 { 3942 int tx_desc = txq->next_desc_to_proc; 3943 3944 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3945 return txq->descs + tx_desc; 3946 } 3947 3948 /* Update HW with number of aggregated Tx descriptors to be sent */ 3949 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3950 { 3951 /* aggregated access - relevant TXQ number is written in TX desc */ 3952 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3953 } 3954 3955 /* Get number of sent descriptors and decrement counter. 3956 * The number of sent descriptors is returned. 3957 * Per-CPU access 3958 */ 3959 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3960 struct mvpp2_tx_queue *txq) 3961 { 3962 u32 val; 3963 3964 /* Reading status reg resets transmitted descriptor counter */ 3965 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3966 3967 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3968 MVPP2_TRANSMITTED_COUNT_OFFSET; 3969 } 3970 3971 static void mvpp2_txq_sent_counter_clear(void *arg) 3972 { 3973 struct mvpp2_port *port = arg; 3974 int queue; 3975 3976 for (queue = 0; queue < txq_number; queue++) { 3977 int id = port->txqs[queue]->id; 3978 3979 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3980 } 3981 } 3982 3983 /* Set max sizes for Tx queues */ 3984 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3985 { 3986 u32 val, size, mtu; 3987 int txq, tx_port_num; 3988 3989 mtu = port->pkt_size * 8; 3990 if (mtu > MVPP2_TXP_MTU_MAX) 3991 mtu = MVPP2_TXP_MTU_MAX; 3992 3993 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3994 mtu = 3 * mtu; 3995 3996 /* Indirect access to registers */ 3997 tx_port_num = mvpp2_egress_port(port); 3998 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3999 4000 /* Set MTU */ 4001 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 4002 val &= ~MVPP2_TXP_MTU_MAX; 4003 val |= mtu; 4004 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 4005 4006 /* TXP token size and all TXQs token size must be larger that MTU */ 4007 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4008 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4009 if (size < mtu) { 4010 size = mtu; 4011 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4012 val |= size; 4013 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4014 } 4015 4016 for (txq = 0; txq < txq_number; txq++) { 4017 val = mvpp2_read(port->priv, 4018 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4019 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4020 4021 if (size < mtu) { 4022 size = mtu; 4023 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4024 val |= size; 4025 mvpp2_write(port->priv, 4026 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4027 val); 4028 } 4029 } 4030 } 4031 4032 /* Free Tx queue skbuffs */ 4033 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4034 struct mvpp2_tx_queue *txq, 4035 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4036 { 4037 int i; 4038 4039 for (i = 0; i < num; i++) 4040 mvpp2_txq_inc_get(txq_pcpu); 4041 } 4042 4043 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4044 u32 cause) 4045 { 4046 int queue = fls(cause) - 1; 4047 4048 return port->rxqs[queue]; 4049 } 4050 4051 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4052 u32 cause) 4053 { 4054 int queue = fls(cause) - 1; 4055 4056 return port->txqs[queue]; 4057 } 4058 4059 /* Rx/Tx queue initialization/cleanup methods */ 4060 4061 /* Allocate and initialize descriptors for aggr TXQ */ 4062 static int mvpp2_aggr_txq_init(struct udevice *dev, 4063 struct mvpp2_tx_queue *aggr_txq, 4064 int desc_num, int cpu, 4065 struct mvpp2 *priv) 4066 { 4067 u32 txq_dma; 4068 4069 /* Allocate memory for TX descriptors */ 4070 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4071 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4072 if (!aggr_txq->descs) 4073 return -ENOMEM; 4074 4075 /* Make sure descriptor address is cache line size aligned */ 4076 BUG_ON(aggr_txq->descs != 4077 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4078 4079 aggr_txq->last_desc = aggr_txq->size - 1; 4080 4081 /* Aggr TXQ no reset WA */ 4082 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4083 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4084 4085 /* Set Tx descriptors queue starting address indirect 4086 * access 4087 */ 4088 if (priv->hw_version == MVPP21) 4089 txq_dma = aggr_txq->descs_dma; 4090 else 4091 txq_dma = aggr_txq->descs_dma >> 4092 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4093 4094 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4095 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4096 4097 return 0; 4098 } 4099 4100 /* Create a specified Rx queue */ 4101 static int mvpp2_rxq_init(struct mvpp2_port *port, 4102 struct mvpp2_rx_queue *rxq) 4103 4104 { 4105 u32 rxq_dma; 4106 4107 rxq->size = port->rx_ring_size; 4108 4109 /* Allocate memory for RX descriptors */ 4110 rxq->descs = buffer_loc.rx_descs; 4111 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4112 if (!rxq->descs) 4113 return -ENOMEM; 4114 4115 BUG_ON(rxq->descs != 4116 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4117 4118 rxq->last_desc = rxq->size - 1; 4119 4120 /* Zero occupied and non-occupied counters - direct access */ 4121 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4122 4123 /* Set Rx descriptors queue starting address - indirect access */ 4124 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4125 if (port->priv->hw_version == MVPP21) 4126 rxq_dma = rxq->descs_dma; 4127 else 4128 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4129 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4130 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4131 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4132 4133 /* Set Offset */ 4134 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4135 4136 /* Add number of descriptors ready for receiving packets */ 4137 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4138 4139 return 0; 4140 } 4141 4142 /* Push packets received by the RXQ to BM pool */ 4143 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4144 struct mvpp2_rx_queue *rxq) 4145 { 4146 int rx_received, i; 4147 4148 rx_received = mvpp2_rxq_received(port, rxq->id); 4149 if (!rx_received) 4150 return; 4151 4152 for (i = 0; i < rx_received; i++) { 4153 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4154 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4155 4156 mvpp2_pool_refill(port, bm, 4157 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4158 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4159 } 4160 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4161 } 4162 4163 /* Cleanup Rx queue */ 4164 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4165 struct mvpp2_rx_queue *rxq) 4166 { 4167 mvpp2_rxq_drop_pkts(port, rxq); 4168 4169 rxq->descs = NULL; 4170 rxq->last_desc = 0; 4171 rxq->next_desc_to_proc = 0; 4172 rxq->descs_dma = 0; 4173 4174 /* Clear Rx descriptors queue starting address and size; 4175 * free descriptor number 4176 */ 4177 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4178 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4179 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4180 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4181 } 4182 4183 /* Create and initialize a Tx queue */ 4184 static int mvpp2_txq_init(struct mvpp2_port *port, 4185 struct mvpp2_tx_queue *txq) 4186 { 4187 u32 val; 4188 int cpu, desc, desc_per_txq, tx_port_num; 4189 struct mvpp2_txq_pcpu *txq_pcpu; 4190 4191 txq->size = port->tx_ring_size; 4192 4193 /* Allocate memory for Tx descriptors */ 4194 txq->descs = buffer_loc.tx_descs; 4195 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4196 if (!txq->descs) 4197 return -ENOMEM; 4198 4199 /* Make sure descriptor address is cache line size aligned */ 4200 BUG_ON(txq->descs != 4201 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4202 4203 txq->last_desc = txq->size - 1; 4204 4205 /* Set Tx descriptors queue starting address - indirect access */ 4206 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4207 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4208 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4209 MVPP2_TXQ_DESC_SIZE_MASK); 4210 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4211 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4212 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4213 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4214 val &= ~MVPP2_TXQ_PENDING_MASK; 4215 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4216 4217 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4218 * for each existing TXQ. 4219 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4220 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4221 */ 4222 desc_per_txq = 16; 4223 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4224 (txq->log_id * desc_per_txq); 4225 4226 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4227 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4228 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4229 4230 /* WRR / EJP configuration - indirect access */ 4231 tx_port_num = mvpp2_egress_port(port); 4232 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4233 4234 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4235 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4236 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4237 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4238 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4239 4240 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4241 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4242 val); 4243 4244 for_each_present_cpu(cpu) { 4245 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4246 txq_pcpu->size = txq->size; 4247 } 4248 4249 return 0; 4250 } 4251 4252 /* Free allocated TXQ resources */ 4253 static void mvpp2_txq_deinit(struct mvpp2_port *port, 4254 struct mvpp2_tx_queue *txq) 4255 { 4256 txq->descs = NULL; 4257 txq->last_desc = 0; 4258 txq->next_desc_to_proc = 0; 4259 txq->descs_dma = 0; 4260 4261 /* Set minimum bandwidth for disabled TXQs */ 4262 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4263 4264 /* Set Tx descriptors queue starting address and size */ 4265 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4266 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4267 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4268 } 4269 4270 /* Cleanup Tx ports */ 4271 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4272 { 4273 struct mvpp2_txq_pcpu *txq_pcpu; 4274 int delay, pending, cpu; 4275 u32 val; 4276 4277 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4278 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4279 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4280 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4281 4282 /* The napi queue has been stopped so wait for all packets 4283 * to be transmitted. 4284 */ 4285 delay = 0; 4286 do { 4287 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4288 netdev_warn(port->dev, 4289 "port %d: cleaning queue %d timed out\n", 4290 port->id, txq->log_id); 4291 break; 4292 } 4293 mdelay(1); 4294 delay++; 4295 4296 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4297 } while (pending); 4298 4299 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4300 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4301 4302 for_each_present_cpu(cpu) { 4303 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4304 4305 /* Release all packets */ 4306 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4307 4308 /* Reset queue */ 4309 txq_pcpu->count = 0; 4310 txq_pcpu->txq_put_index = 0; 4311 txq_pcpu->txq_get_index = 0; 4312 } 4313 } 4314 4315 /* Cleanup all Tx queues */ 4316 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4317 { 4318 struct mvpp2_tx_queue *txq; 4319 int queue; 4320 u32 val; 4321 4322 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4323 4324 /* Reset Tx ports and delete Tx queues */ 4325 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4326 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4327 4328 for (queue = 0; queue < txq_number; queue++) { 4329 txq = port->txqs[queue]; 4330 mvpp2_txq_clean(port, txq); 4331 mvpp2_txq_deinit(port, txq); 4332 } 4333 4334 mvpp2_txq_sent_counter_clear(port); 4335 4336 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4337 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4338 } 4339 4340 /* Cleanup all Rx queues */ 4341 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4342 { 4343 int queue; 4344 4345 for (queue = 0; queue < rxq_number; queue++) 4346 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4347 } 4348 4349 /* Init all Rx queues for port */ 4350 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4351 { 4352 int queue, err; 4353 4354 for (queue = 0; queue < rxq_number; queue++) { 4355 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4356 if (err) 4357 goto err_cleanup; 4358 } 4359 return 0; 4360 4361 err_cleanup: 4362 mvpp2_cleanup_rxqs(port); 4363 return err; 4364 } 4365 4366 /* Init all tx queues for port */ 4367 static int mvpp2_setup_txqs(struct mvpp2_port *port) 4368 { 4369 struct mvpp2_tx_queue *txq; 4370 int queue, err; 4371 4372 for (queue = 0; queue < txq_number; queue++) { 4373 txq = port->txqs[queue]; 4374 err = mvpp2_txq_init(port, txq); 4375 if (err) 4376 goto err_cleanup; 4377 } 4378 4379 mvpp2_txq_sent_counter_clear(port); 4380 return 0; 4381 4382 err_cleanup: 4383 mvpp2_cleanup_txqs(port); 4384 return err; 4385 } 4386 4387 /* Adjust link */ 4388 static void mvpp2_link_event(struct mvpp2_port *port) 4389 { 4390 struct phy_device *phydev = port->phy_dev; 4391 int status_change = 0; 4392 u32 val; 4393 4394 if (phydev->link) { 4395 if ((port->speed != phydev->speed) || 4396 (port->duplex != phydev->duplex)) { 4397 u32 val; 4398 4399 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4400 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4401 MVPP2_GMAC_CONFIG_GMII_SPEED | 4402 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4403 MVPP2_GMAC_AN_SPEED_EN | 4404 MVPP2_GMAC_AN_DUPLEX_EN); 4405 4406 if (phydev->duplex) 4407 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4408 4409 if (phydev->speed == SPEED_1000) 4410 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4411 else if (phydev->speed == SPEED_100) 4412 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4413 4414 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4415 4416 port->duplex = phydev->duplex; 4417 port->speed = phydev->speed; 4418 } 4419 } 4420 4421 if (phydev->link != port->link) { 4422 if (!phydev->link) { 4423 port->duplex = -1; 4424 port->speed = 0; 4425 } 4426 4427 port->link = phydev->link; 4428 status_change = 1; 4429 } 4430 4431 if (status_change) { 4432 if (phydev->link) { 4433 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4434 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4435 MVPP2_GMAC_FORCE_LINK_DOWN); 4436 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4437 mvpp2_egress_enable(port); 4438 mvpp2_ingress_enable(port); 4439 } else { 4440 mvpp2_ingress_disable(port); 4441 mvpp2_egress_disable(port); 4442 } 4443 } 4444 } 4445 4446 /* Main RX/TX processing routines */ 4447 4448 /* Display more error info */ 4449 static void mvpp2_rx_error(struct mvpp2_port *port, 4450 struct mvpp2_rx_desc *rx_desc) 4451 { 4452 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4453 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4454 4455 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4456 case MVPP2_RXD_ERR_CRC: 4457 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 4458 status, sz); 4459 break; 4460 case MVPP2_RXD_ERR_OVERRUN: 4461 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 4462 status, sz); 4463 break; 4464 case MVPP2_RXD_ERR_RESOURCE: 4465 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 4466 status, sz); 4467 break; 4468 } 4469 } 4470 4471 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4472 static int mvpp2_rx_refill(struct mvpp2_port *port, 4473 struct mvpp2_bm_pool *bm_pool, 4474 u32 bm, dma_addr_t dma_addr) 4475 { 4476 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4477 return 0; 4478 } 4479 4480 /* Set hw internals when starting port */ 4481 static void mvpp2_start_dev(struct mvpp2_port *port) 4482 { 4483 switch (port->phy_interface) { 4484 case PHY_INTERFACE_MODE_RGMII: 4485 case PHY_INTERFACE_MODE_RGMII_ID: 4486 case PHY_INTERFACE_MODE_SGMII: 4487 mvpp2_gmac_max_rx_size_set(port); 4488 default: 4489 break; 4490 } 4491 4492 mvpp2_txp_max_tx_size_set(port); 4493 4494 if (port->priv->hw_version == MVPP21) 4495 mvpp2_port_enable(port); 4496 else 4497 gop_port_enable(port, 1); 4498 } 4499 4500 /* Set hw internals when stopping port */ 4501 static void mvpp2_stop_dev(struct mvpp2_port *port) 4502 { 4503 /* Stop new packets from arriving to RXQs */ 4504 mvpp2_ingress_disable(port); 4505 4506 mvpp2_egress_disable(port); 4507 4508 if (port->priv->hw_version == MVPP21) 4509 mvpp2_port_disable(port); 4510 else 4511 gop_port_enable(port, 0); 4512 } 4513 4514 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4515 { 4516 struct phy_device *phy_dev; 4517 4518 if (!port->init || port->link == 0) { 4519 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 4520 port->phy_interface); 4521 port->phy_dev = phy_dev; 4522 if (!phy_dev) { 4523 netdev_err(port->dev, "cannot connect to phy\n"); 4524 return -ENODEV; 4525 } 4526 phy_dev->supported &= PHY_GBIT_FEATURES; 4527 phy_dev->advertising = phy_dev->supported; 4528 4529 port->phy_dev = phy_dev; 4530 port->link = 0; 4531 port->duplex = 0; 4532 port->speed = 0; 4533 4534 phy_config(phy_dev); 4535 phy_startup(phy_dev); 4536 if (!phy_dev->link) { 4537 printf("%s: No link\n", phy_dev->dev->name); 4538 return -1; 4539 } 4540 4541 port->init = 1; 4542 } else { 4543 mvpp2_egress_enable(port); 4544 mvpp2_ingress_enable(port); 4545 } 4546 4547 return 0; 4548 } 4549 4550 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4551 { 4552 unsigned char mac_bcast[ETH_ALEN] = { 4553 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4554 int err; 4555 4556 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4557 if (err) { 4558 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4559 return err; 4560 } 4561 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4562 port->dev_addr, true); 4563 if (err) { 4564 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4565 return err; 4566 } 4567 err = mvpp2_prs_def_flow(port); 4568 if (err) { 4569 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 4570 return err; 4571 } 4572 4573 /* Allocate the Rx/Tx queues */ 4574 err = mvpp2_setup_rxqs(port); 4575 if (err) { 4576 netdev_err(port->dev, "cannot allocate Rx queues\n"); 4577 return err; 4578 } 4579 4580 err = mvpp2_setup_txqs(port); 4581 if (err) { 4582 netdev_err(port->dev, "cannot allocate Tx queues\n"); 4583 return err; 4584 } 4585 4586 if (port->phy_node) { 4587 err = mvpp2_phy_connect(dev, port); 4588 if (err < 0) 4589 return err; 4590 4591 mvpp2_link_event(port); 4592 } else { 4593 mvpp2_egress_enable(port); 4594 mvpp2_ingress_enable(port); 4595 } 4596 4597 mvpp2_start_dev(port); 4598 4599 return 0; 4600 } 4601 4602 /* No Device ops here in U-Boot */ 4603 4604 /* Driver initialization */ 4605 4606 static void mvpp2_port_power_up(struct mvpp2_port *port) 4607 { 4608 struct mvpp2 *priv = port->priv; 4609 4610 /* On PPv2.2 the GoP / interface configuration has already been done */ 4611 if (priv->hw_version == MVPP21) 4612 mvpp2_port_mii_set(port); 4613 mvpp2_port_periodic_xon_disable(port); 4614 if (priv->hw_version == MVPP21) 4615 mvpp2_port_fc_adv_enable(port); 4616 mvpp2_port_reset(port); 4617 } 4618 4619 /* Initialize port HW */ 4620 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4621 { 4622 struct mvpp2 *priv = port->priv; 4623 struct mvpp2_txq_pcpu *txq_pcpu; 4624 int queue, cpu, err; 4625 4626 if (port->first_rxq + rxq_number > 4627 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4628 return -EINVAL; 4629 4630 /* Disable port */ 4631 mvpp2_egress_disable(port); 4632 if (priv->hw_version == MVPP21) 4633 mvpp2_port_disable(port); 4634 else 4635 gop_port_enable(port, 0); 4636 4637 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4638 GFP_KERNEL); 4639 if (!port->txqs) 4640 return -ENOMEM; 4641 4642 /* Associate physical Tx queues to this port and initialize. 4643 * The mapping is predefined. 4644 */ 4645 for (queue = 0; queue < txq_number; queue++) { 4646 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4647 struct mvpp2_tx_queue *txq; 4648 4649 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4650 if (!txq) 4651 return -ENOMEM; 4652 4653 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4654 GFP_KERNEL); 4655 if (!txq->pcpu) 4656 return -ENOMEM; 4657 4658 txq->id = queue_phy_id; 4659 txq->log_id = queue; 4660 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4661 for_each_present_cpu(cpu) { 4662 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4663 txq_pcpu->cpu = cpu; 4664 } 4665 4666 port->txqs[queue] = txq; 4667 } 4668 4669 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4670 GFP_KERNEL); 4671 if (!port->rxqs) 4672 return -ENOMEM; 4673 4674 /* Allocate and initialize Rx queue for this port */ 4675 for (queue = 0; queue < rxq_number; queue++) { 4676 struct mvpp2_rx_queue *rxq; 4677 4678 /* Map physical Rx queue to port's logical Rx queue */ 4679 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4680 if (!rxq) 4681 return -ENOMEM; 4682 /* Map this Rx queue to a physical queue */ 4683 rxq->id = port->first_rxq + queue; 4684 rxq->port = port->id; 4685 rxq->logic_rxq = queue; 4686 4687 port->rxqs[queue] = rxq; 4688 } 4689 4690 4691 /* Create Rx descriptor rings */ 4692 for (queue = 0; queue < rxq_number; queue++) { 4693 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4694 4695 rxq->size = port->rx_ring_size; 4696 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4697 rxq->time_coal = MVPP2_RX_COAL_USEC; 4698 } 4699 4700 mvpp2_ingress_disable(port); 4701 4702 /* Port default configuration */ 4703 mvpp2_defaults_set(port); 4704 4705 /* Port's classifier configuration */ 4706 mvpp2_cls_oversize_rxq_set(port); 4707 mvpp2_cls_port_config(port); 4708 4709 /* Provide an initial Rx packet size */ 4710 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4711 4712 /* Initialize pools for swf */ 4713 err = mvpp2_swf_bm_pool_init(port); 4714 if (err) 4715 return err; 4716 4717 return 0; 4718 } 4719 4720 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4721 { 4722 int port_node = dev_of_offset(dev); 4723 const char *phy_mode_str; 4724 int phy_node, mdio_off, cp_node; 4725 u32 id; 4726 u32 phyaddr = 0; 4727 int phy_mode = -1; 4728 u64 mdio_addr; 4729 4730 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4731 4732 if (phy_node > 0) { 4733 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 4734 if (phyaddr < 0) { 4735 dev_err(&pdev->dev, "could not find phy address\n"); 4736 return -1; 4737 } 4738 mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node); 4739 4740 /* TODO: This WA for mdio issue. U-boot 2017 don't have 4741 * mdio driver and on MACHIATOBin board ports from CP1 4742 * connected to mdio on CP0. 4743 * WA is to get mdio address from phy handler parent 4744 * base address. WA should be removed after 4745 * mdio driver implementation. 4746 */ 4747 mdio_addr = fdtdec_get_uint(gd->fdt_blob, 4748 mdio_off, "reg", 0); 4749 4750 cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off); 4751 mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob, 4752 cp_node); 4753 4754 port->priv->mdio_base = (void *)mdio_addr; 4755 4756 if (port->priv->mdio_base < 0) { 4757 dev_err(&pdev->dev, "could not find mdio base address\n"); 4758 return -1; 4759 } 4760 } else { 4761 phy_node = 0; 4762 } 4763 4764 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 4765 if (phy_mode_str) 4766 phy_mode = phy_get_interface_by_name(phy_mode_str); 4767 if (phy_mode == -1) { 4768 dev_err(&pdev->dev, "incorrect phy mode\n"); 4769 return -EINVAL; 4770 } 4771 4772 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4773 if (id == -1) { 4774 dev_err(&pdev->dev, "missing port-id value\n"); 4775 return -EINVAL; 4776 } 4777 4778 #ifdef CONFIG_DM_GPIO 4779 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4780 &port->phy_reset_gpio, GPIOD_IS_OUT); 4781 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4782 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4783 #endif 4784 4785 /* 4786 * ToDo: 4787 * Not sure if this DT property "phy-speed" will get accepted, so 4788 * this might change later 4789 */ 4790 /* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */ 4791 port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node, 4792 "phy-speed", 1000); 4793 4794 port->id = id; 4795 if (port->priv->hw_version == MVPP21) 4796 port->first_rxq = port->id * rxq_number; 4797 else 4798 port->first_rxq = port->id * port->priv->max_port_rxqs; 4799 port->phy_node = phy_node; 4800 port->phy_interface = phy_mode; 4801 port->phyaddr = phyaddr; 4802 4803 return 0; 4804 } 4805 4806 #ifdef CONFIG_DM_GPIO 4807 /* Port GPIO initialization */ 4808 static void mvpp2_gpio_init(struct mvpp2_port *port) 4809 { 4810 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4811 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4812 udelay(1000); 4813 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4814 } 4815 4816 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4817 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4818 } 4819 #endif 4820 4821 /* Ports initialization */ 4822 static int mvpp2_port_probe(struct udevice *dev, 4823 struct mvpp2_port *port, 4824 int port_node, 4825 struct mvpp2 *priv) 4826 { 4827 int err; 4828 4829 port->tx_ring_size = MVPP2_MAX_TXD; 4830 port->rx_ring_size = MVPP2_MAX_RXD; 4831 4832 err = mvpp2_port_init(dev, port); 4833 if (err < 0) { 4834 dev_err(&pdev->dev, "failed to init port %d\n", port->id); 4835 return err; 4836 } 4837 mvpp2_port_power_up(port); 4838 4839 #ifdef CONFIG_DM_GPIO 4840 mvpp2_gpio_init(port); 4841 #endif 4842 4843 priv->port_list[port->id] = port; 4844 priv->num_ports++; 4845 return 0; 4846 } 4847 4848 /* Initialize decoding windows */ 4849 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4850 struct mvpp2 *priv) 4851 { 4852 u32 win_enable; 4853 int i; 4854 4855 for (i = 0; i < 6; i++) { 4856 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4857 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4858 4859 if (i < 4) 4860 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4861 } 4862 4863 win_enable = 0; 4864 4865 for (i = 0; i < dram->num_cs; i++) { 4866 const struct mbus_dram_window *cs = dram->cs + i; 4867 4868 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4869 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4870 dram->mbus_dram_target_id); 4871 4872 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4873 (cs->size - 1) & 0xffff0000); 4874 4875 win_enable |= (1 << i); 4876 } 4877 4878 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4879 } 4880 4881 /* Initialize Rx FIFO's */ 4882 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4883 { 4884 int port; 4885 4886 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4887 if (priv->hw_version == MVPP22) { 4888 if (port == 0) { 4889 mvpp2_write(priv, 4890 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4891 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4892 mvpp2_write(priv, 4893 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4894 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4895 } else if (port == 1) { 4896 mvpp2_write(priv, 4897 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4898 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4899 mvpp2_write(priv, 4900 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4901 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4902 } else { 4903 mvpp2_write(priv, 4904 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4905 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4906 mvpp2_write(priv, 4907 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4908 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4909 } 4910 } else { 4911 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4912 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4913 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4914 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4915 } 4916 } 4917 4918 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4919 MVPP2_RX_FIFO_PORT_MIN_PKT); 4920 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4921 } 4922 4923 /* Initialize Tx FIFO's */ 4924 static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4925 { 4926 int port, val; 4927 4928 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4929 /* Port 0 supports 10KB TX FIFO */ 4930 if (port == 0) { 4931 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4932 MVPP22_TX_FIFO_SIZE_MASK; 4933 } else { 4934 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4935 MVPP22_TX_FIFO_SIZE_MASK; 4936 } 4937 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4938 } 4939 } 4940 4941 static void mvpp2_axi_init(struct mvpp2 *priv) 4942 { 4943 u32 val, rdval, wrval; 4944 4945 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4946 4947 /* AXI Bridge Configuration */ 4948 4949 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4950 << MVPP22_AXI_ATTR_CACHE_OFFS; 4951 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4952 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4953 4954 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4955 << MVPP22_AXI_ATTR_CACHE_OFFS; 4956 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4957 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4958 4959 /* BM */ 4960 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4961 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4962 4963 /* Descriptors */ 4964 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4965 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4966 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4967 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4968 4969 /* Buffer Data */ 4970 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4971 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4972 4973 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4974 << MVPP22_AXI_CODE_CACHE_OFFS; 4975 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4976 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4977 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4978 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4979 4980 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4981 << MVPP22_AXI_CODE_CACHE_OFFS; 4982 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4983 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4984 4985 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4986 4987 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4988 << MVPP22_AXI_CODE_CACHE_OFFS; 4989 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4990 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4991 4992 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 4993 } 4994 4995 /* Initialize network controller common part HW */ 4996 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 4997 { 4998 const struct mbus_dram_target_info *dram_target_info; 4999 int err, i; 5000 u32 val; 5001 5002 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 5003 if ((rxq_number > priv->max_port_rxqs) || 5004 (txq_number > MVPP2_MAX_TXQ)) { 5005 dev_err(&pdev->dev, "invalid queue size parameter\n"); 5006 return -EINVAL; 5007 } 5008 5009 if (priv->hw_version == MVPP22) 5010 mvpp2_axi_init(priv); 5011 else { 5012 /* MBUS windows configuration */ 5013 dram_target_info = mvebu_mbus_dram_info(); 5014 if (dram_target_info) 5015 mvpp2_conf_mbus_windows(dram_target_info, priv); 5016 } 5017 5018 if (priv->hw_version == MVPP21) { 5019 /* Disable HW PHY polling */ 5020 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5021 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5022 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5023 } else { 5024 /* Enable HW PHY polling */ 5025 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5026 val |= MVPP22_SMI_POLLING_EN; 5027 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5028 } 5029 5030 /* Allocate and initialize aggregated TXQs */ 5031 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5032 sizeof(struct mvpp2_tx_queue), 5033 GFP_KERNEL); 5034 if (!priv->aggr_txqs) 5035 return -ENOMEM; 5036 5037 for_each_present_cpu(i) { 5038 priv->aggr_txqs[i].id = i; 5039 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5040 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5041 MVPP2_AGGR_TXQ_SIZE, i, priv); 5042 if (err < 0) 5043 return err; 5044 } 5045 5046 /* Rx Fifo Init */ 5047 mvpp2_rx_fifo_init(priv); 5048 5049 /* Tx Fifo Init */ 5050 if (priv->hw_version == MVPP22) 5051 mvpp2_tx_fifo_init(priv); 5052 5053 if (priv->hw_version == MVPP21) 5054 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5055 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5056 5057 /* Allow cache snoop when transmiting packets */ 5058 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5059 5060 /* Buffer Manager initialization */ 5061 err = mvpp2_bm_init(dev, priv); 5062 if (err < 0) 5063 return err; 5064 5065 /* Parser default initialization */ 5066 err = mvpp2_prs_default_init(dev, priv); 5067 if (err < 0) 5068 return err; 5069 5070 /* Classifier default initialization */ 5071 mvpp2_cls_init(priv); 5072 5073 return 0; 5074 } 5075 5076 /* SMI / MDIO functions */ 5077 5078 static int smi_wait_ready(struct mvpp2 *priv) 5079 { 5080 u32 timeout = MVPP2_SMI_TIMEOUT; 5081 u32 smi_reg; 5082 5083 /* wait till the SMI is not busy */ 5084 do { 5085 /* read smi register */ 5086 smi_reg = readl(priv->mdio_base); 5087 if (timeout-- == 0) { 5088 printf("Error: SMI busy timeout\n"); 5089 return -EFAULT; 5090 } 5091 } while (smi_reg & MVPP2_SMI_BUSY); 5092 5093 return 0; 5094 } 5095 5096 /* 5097 * mpp2_mdio_read - miiphy_read callback function. 5098 * 5099 * Returns 16bit phy register value, or 0xffff on error 5100 */ 5101 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 5102 { 5103 struct mvpp2 *priv = bus->priv; 5104 u32 smi_reg; 5105 u32 timeout; 5106 5107 /* check parameters */ 5108 if (addr > MVPP2_PHY_ADDR_MASK) { 5109 printf("Error: Invalid PHY address %d\n", addr); 5110 return -EFAULT; 5111 } 5112 5113 if (reg > MVPP2_PHY_REG_MASK) { 5114 printf("Err: Invalid register offset %d\n", reg); 5115 return -EFAULT; 5116 } 5117 5118 /* wait till the SMI is not busy */ 5119 if (smi_wait_ready(priv) < 0) 5120 return -EFAULT; 5121 5122 /* fill the phy address and regiser offset and read opcode */ 5123 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5124 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 5125 | MVPP2_SMI_OPCODE_READ; 5126 5127 /* write the smi register */ 5128 writel(smi_reg, priv->mdio_base); 5129 5130 /* wait till read value is ready */ 5131 timeout = MVPP2_SMI_TIMEOUT; 5132 5133 do { 5134 /* read smi register */ 5135 smi_reg = readl(priv->mdio_base); 5136 if (timeout-- == 0) { 5137 printf("Err: SMI read ready timeout\n"); 5138 return -EFAULT; 5139 } 5140 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 5141 5142 /* Wait for the data to update in the SMI register */ 5143 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 5144 ; 5145 5146 return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK; 5147 } 5148 5149 /* 5150 * mpp2_mdio_write - miiphy_write callback function. 5151 * 5152 * Returns 0 if write succeed, -EINVAL on bad parameters 5153 * -ETIME on timeout 5154 */ 5155 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 5156 u16 value) 5157 { 5158 struct mvpp2 *priv = bus->priv; 5159 u32 smi_reg; 5160 5161 /* check parameters */ 5162 if (addr > MVPP2_PHY_ADDR_MASK) { 5163 printf("Error: Invalid PHY address %d\n", addr); 5164 return -EFAULT; 5165 } 5166 5167 if (reg > MVPP2_PHY_REG_MASK) { 5168 printf("Err: Invalid register offset %d\n", reg); 5169 return -EFAULT; 5170 } 5171 5172 /* wait till the SMI is not busy */ 5173 if (smi_wait_ready(priv) < 0) 5174 return -EFAULT; 5175 5176 /* fill the phy addr and reg offset and write opcode and data */ 5177 smi_reg = value << MVPP2_SMI_DATA_OFFS; 5178 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 5179 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 5180 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 5181 5182 /* write the smi register */ 5183 writel(smi_reg, priv->mdio_base); 5184 5185 return 0; 5186 } 5187 5188 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5189 { 5190 struct mvpp2_port *port = dev_get_priv(dev); 5191 struct mvpp2_rx_desc *rx_desc; 5192 struct mvpp2_bm_pool *bm_pool; 5193 dma_addr_t dma_addr; 5194 u32 bm, rx_status; 5195 int pool, rx_bytes, err; 5196 int rx_received; 5197 struct mvpp2_rx_queue *rxq; 5198 u8 *data; 5199 5200 /* Process RX packets */ 5201 rxq = port->rxqs[0]; 5202 5203 /* Get number of received packets and clamp the to-do */ 5204 rx_received = mvpp2_rxq_received(port, rxq->id); 5205 5206 /* Return if no packets are received */ 5207 if (!rx_received) 5208 return 0; 5209 5210 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5211 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5212 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5213 rx_bytes -= MVPP2_MH_SIZE; 5214 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5215 5216 bm = mvpp2_bm_cookie_build(port, rx_desc); 5217 pool = mvpp2_bm_cookie_pool_get(bm); 5218 bm_pool = &port->priv->bm_pools[pool]; 5219 5220 /* In case of an error, release the requested buffer pointer 5221 * to the Buffer Manager. This request process is controlled 5222 * by the hardware, and the information about the buffer is 5223 * comprised by the RX descriptor. 5224 */ 5225 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5226 mvpp2_rx_error(port, rx_desc); 5227 /* Return the buffer to the pool */ 5228 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5229 return 0; 5230 } 5231 5232 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5233 if (err) { 5234 netdev_err(port->dev, "failed to refill BM pools\n"); 5235 return 0; 5236 } 5237 5238 /* Update Rx queue management counters */ 5239 mb(); 5240 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5241 5242 /* give packet to stack - skip on first n bytes */ 5243 data = (u8 *)dma_addr + 2 + 32; 5244 5245 if (rx_bytes <= 0) 5246 return 0; 5247 5248 /* 5249 * No cache invalidation needed here, since the rx_buffer's are 5250 * located in a uncached memory region 5251 */ 5252 *packetp = data; 5253 5254 return rx_bytes; 5255 } 5256 5257 static int mvpp2_send(struct udevice *dev, void *packet, int length) 5258 { 5259 struct mvpp2_port *port = dev_get_priv(dev); 5260 struct mvpp2_tx_queue *txq, *aggr_txq; 5261 struct mvpp2_tx_desc *tx_desc; 5262 int tx_done; 5263 int timeout; 5264 5265 txq = port->txqs[0]; 5266 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5267 5268 /* Get a descriptor for the first part of the packet */ 5269 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5270 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5271 mvpp2_txdesc_size_set(port, tx_desc, length); 5272 mvpp2_txdesc_offset_set(port, tx_desc, 5273 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5274 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5275 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5276 /* First and Last descriptor */ 5277 mvpp2_txdesc_cmd_set(port, tx_desc, 5278 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5279 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5280 5281 /* Flush tx data */ 5282 flush_dcache_range((unsigned long)packet, 5283 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5284 5285 /* Enable transmit */ 5286 mb(); 5287 mvpp2_aggr_txq_pend_desc_add(port, 1); 5288 5289 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5290 5291 timeout = 0; 5292 do { 5293 if (timeout++ > 10000) { 5294 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5295 return 0; 5296 } 5297 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5298 } while (tx_done); 5299 5300 timeout = 0; 5301 do { 5302 if (timeout++ > 10000) { 5303 printf("timeout: packet not sent\n"); 5304 return 0; 5305 } 5306 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5307 } while (!tx_done); 5308 5309 return 0; 5310 } 5311 5312 static int mvpp2_start(struct udevice *dev) 5313 { 5314 struct eth_pdata *pdata = dev_get_platdata(dev); 5315 struct mvpp2_port *port = dev_get_priv(dev); 5316 5317 /* Load current MAC address */ 5318 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5319 5320 /* Reconfigure parser accept the original MAC address */ 5321 mvpp2_prs_update_mac_da(port, port->dev_addr); 5322 5323 switch (port->phy_interface) { 5324 case PHY_INTERFACE_MODE_RGMII: 5325 case PHY_INTERFACE_MODE_RGMII_ID: 5326 case PHY_INTERFACE_MODE_SGMII: 5327 mvpp2_port_power_up(port); 5328 default: 5329 break; 5330 } 5331 5332 mvpp2_open(dev, port); 5333 5334 return 0; 5335 } 5336 5337 static void mvpp2_stop(struct udevice *dev) 5338 { 5339 struct mvpp2_port *port = dev_get_priv(dev); 5340 5341 mvpp2_stop_dev(port); 5342 mvpp2_cleanup_rxqs(port); 5343 mvpp2_cleanup_txqs(port); 5344 } 5345 5346 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port) 5347 { 5348 writel(port->phyaddr, port->priv->iface_base + 5349 MVPP22_SMI_PHY_ADDR_REG(port->gop_id)); 5350 5351 return 0; 5352 } 5353 5354 static int mvpp2_base_probe(struct udevice *dev) 5355 { 5356 struct mvpp2 *priv = dev_get_priv(dev); 5357 struct mii_dev *bus; 5358 void *bd_space; 5359 u32 size = 0; 5360 int i; 5361 5362 /* Save hw-version */ 5363 priv->hw_version = dev_get_driver_data(dev); 5364 5365 /* 5366 * U-Boot special buffer handling: 5367 * 5368 * Allocate buffer area for descs and rx_buffers. This is only 5369 * done once for all interfaces. As only one interface can 5370 * be active. Make this area DMA-safe by disabling the D-cache 5371 */ 5372 5373 /* Align buffer area for descs and rx_buffers to 1MiB */ 5374 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5375 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5376 BD_SPACE, DCACHE_OFF); 5377 5378 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5379 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5380 5381 buffer_loc.tx_descs = 5382 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5383 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5384 5385 buffer_loc.rx_descs = 5386 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5387 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5388 5389 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5390 buffer_loc.bm_pool[i] = 5391 (unsigned long *)((unsigned long)bd_space + size); 5392 if (priv->hw_version == MVPP21) 5393 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5394 else 5395 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5396 } 5397 5398 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5399 buffer_loc.rx_buffer[i] = 5400 (unsigned long *)((unsigned long)bd_space + size); 5401 size += RX_BUFFER_SIZE; 5402 } 5403 5404 /* Clear the complete area so that all descriptors are cleared */ 5405 memset(bd_space, 0, size); 5406 5407 /* Save base addresses for later use */ 5408 priv->base = (void *)devfdt_get_addr_index(dev, 0); 5409 if (IS_ERR(priv->base)) 5410 return PTR_ERR(priv->base); 5411 5412 if (priv->hw_version == MVPP21) { 5413 priv->lms_base = (void *)devfdt_get_addr_index(dev, 1); 5414 if (IS_ERR(priv->lms_base)) 5415 return PTR_ERR(priv->lms_base); 5416 5417 priv->mdio_base = priv->lms_base + MVPP21_SMI; 5418 } else { 5419 priv->iface_base = (void *)devfdt_get_addr_index(dev, 1); 5420 if (IS_ERR(priv->iface_base)) 5421 return PTR_ERR(priv->iface_base); 5422 5423 priv->mdio_base = priv->iface_base + MVPP22_SMI; 5424 5425 /* Store common base addresses for all ports */ 5426 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5427 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5428 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5429 } 5430 5431 if (priv->hw_version == MVPP21) 5432 priv->max_port_rxqs = 8; 5433 else 5434 priv->max_port_rxqs = 32; 5435 5436 /* Finally create and register the MDIO bus driver */ 5437 bus = mdio_alloc(); 5438 if (!bus) { 5439 printf("Failed to allocate MDIO bus\n"); 5440 return -ENOMEM; 5441 } 5442 5443 bus->read = mpp2_mdio_read; 5444 bus->write = mpp2_mdio_write; 5445 snprintf(bus->name, sizeof(bus->name), dev->name); 5446 bus->priv = (void *)priv; 5447 priv->bus = bus; 5448 5449 return mdio_register(bus); 5450 } 5451 5452 static int mvpp2_probe(struct udevice *dev) 5453 { 5454 struct mvpp2_port *port = dev_get_priv(dev); 5455 struct mvpp2 *priv = dev_get_priv(dev->parent); 5456 int err; 5457 5458 /* Only call the probe function for the parent once */ 5459 if (!priv->probe_done) 5460 err = mvpp2_base_probe(dev->parent); 5461 5462 port->priv = dev_get_priv(dev->parent); 5463 5464 err = phy_info_parse(dev, port); 5465 if (err) 5466 return err; 5467 5468 /* 5469 * We need the port specific io base addresses at this stage, since 5470 * gop_port_init() accesses these registers 5471 */ 5472 if (priv->hw_version == MVPP21) { 5473 int priv_common_regs_num = 2; 5474 5475 port->base = (void __iomem *)devfdt_get_addr_index( 5476 dev->parent, priv_common_regs_num + port->id); 5477 if (IS_ERR(port->base)) 5478 return PTR_ERR(port->base); 5479 } else { 5480 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5481 "gop-port-id", -1); 5482 if (port->id == -1) { 5483 dev_err(&pdev->dev, "missing gop-port-id value\n"); 5484 return -EINVAL; 5485 } 5486 5487 port->base = priv->iface_base + MVPP22_PORT_BASE + 5488 port->gop_id * MVPP22_PORT_OFFSET; 5489 5490 /* Set phy address of the port */ 5491 if(port->phy_node) 5492 mvpp22_smi_phy_addr_cfg(port); 5493 5494 /* GoP Init */ 5495 gop_port_init(port); 5496 } 5497 5498 if (!priv->probe_done) { 5499 /* Initialize network controller */ 5500 err = mvpp2_init(dev, priv); 5501 if (err < 0) { 5502 dev_err(&pdev->dev, "failed to initialize controller\n"); 5503 return err; 5504 } 5505 priv->num_ports = 0; 5506 priv->probe_done = 1; 5507 } 5508 5509 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5510 if (err) 5511 return err; 5512 5513 if (priv->hw_version == MVPP22) { 5514 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5515 port->phy_interface); 5516 5517 /* Netcomplex configurations for all ports */ 5518 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5519 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5520 } 5521 5522 return 0; 5523 } 5524 5525 /* 5526 * Empty BM pool and stop its activity before the OS is started 5527 */ 5528 static int mvpp2_remove(struct udevice *dev) 5529 { 5530 struct mvpp2_port *port = dev_get_priv(dev); 5531 struct mvpp2 *priv = port->priv; 5532 int i; 5533 5534 priv->num_ports--; 5535 5536 if (priv->num_ports) 5537 return 0; 5538 5539 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5540 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5541 5542 return 0; 5543 } 5544 5545 static const struct eth_ops mvpp2_ops = { 5546 .start = mvpp2_start, 5547 .send = mvpp2_send, 5548 .recv = mvpp2_recv, 5549 .stop = mvpp2_stop, 5550 }; 5551 5552 static struct driver mvpp2_driver = { 5553 .name = "mvpp2", 5554 .id = UCLASS_ETH, 5555 .probe = mvpp2_probe, 5556 .remove = mvpp2_remove, 5557 .ops = &mvpp2_ops, 5558 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 5559 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 5560 .flags = DM_FLAG_ACTIVE_DMA, 5561 }; 5562 5563 /* 5564 * Use a MISC device to bind the n instances (child nodes) of the 5565 * network base controller in UCLASS_ETH. 5566 */ 5567 static int mvpp2_base_bind(struct udevice *parent) 5568 { 5569 const void *blob = gd->fdt_blob; 5570 int node = dev_of_offset(parent); 5571 struct uclass_driver *drv; 5572 struct udevice *dev; 5573 struct eth_pdata *plat; 5574 char *name; 5575 int subnode; 5576 u32 id; 5577 int base_id_add; 5578 5579 /* Lookup eth driver */ 5580 drv = lists_uclass_lookup(UCLASS_ETH); 5581 if (!drv) { 5582 puts("Cannot find eth driver\n"); 5583 return -ENOENT; 5584 } 5585 5586 base_id_add = base_id; 5587 5588 fdt_for_each_subnode(subnode, blob, node) { 5589 /* Increment base_id for all subnodes, also the disabled ones */ 5590 base_id++; 5591 5592 /* Skip disabled ports */ 5593 if (!fdtdec_get_is_enabled(blob, subnode)) 5594 continue; 5595 5596 plat = calloc(1, sizeof(*plat)); 5597 if (!plat) 5598 return -ENOMEM; 5599 5600 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5601 id += base_id_add; 5602 5603 name = calloc(1, 16); 5604 sprintf(name, "mvpp2-%d", id); 5605 5606 /* Create child device UCLASS_ETH and bind it */ 5607 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 5608 dev_set_of_offset(dev, subnode); 5609 } 5610 5611 return 0; 5612 } 5613 5614 static const struct udevice_id mvpp2_ids[] = { 5615 { 5616 .compatible = "marvell,armada-375-pp2", 5617 .data = MVPP21, 5618 }, 5619 { 5620 .compatible = "marvell,armada-7k-pp22", 5621 .data = MVPP22, 5622 }, 5623 { } 5624 }; 5625 5626 U_BOOT_DRIVER(mvpp2_base) = { 5627 .name = "mvpp2_base", 5628 .id = UCLASS_MISC, 5629 .of_match = mvpp2_ids, 5630 .bind = mvpp2_base_bind, 5631 .priv_auto_alloc_size = sizeof(struct mvpp2), 5632 }; 5633