1 /* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16 #include <common.h> 17 #include <dm.h> 18 #include <dm/device-internal.h> 19 #include <dm/lists.h> 20 #include <net.h> 21 #include <netdev.h> 22 #include <config.h> 23 #include <malloc.h> 24 #include <asm/io.h> 25 #include <linux/errno.h> 26 #include <phy.h> 27 #include <miiphy.h> 28 #include <watchdog.h> 29 #include <asm/arch/cpu.h> 30 #include <asm/arch/soc.h> 31 #include <linux/compat.h> 32 #include <linux/mbus.h> 33 34 DECLARE_GLOBAL_DATA_PTR; 35 36 /* Some linux -> U-Boot compatibility stuff */ 37 #define netdev_err(dev, fmt, args...) \ 38 printf(fmt, ##args) 39 #define netdev_warn(dev, fmt, args...) \ 40 printf(fmt, ##args) 41 #define netdev_info(dev, fmt, args...) \ 42 printf(fmt, ##args) 43 #define netdev_dbg(dev, fmt, args...) \ 44 printf(fmt, ##args) 45 46 #define ETH_ALEN 6 /* Octets in one ethernet addr */ 47 48 #define __verify_pcpu_ptr(ptr) \ 49 do { \ 50 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 51 (void)__vpp_verify; \ 52 } while (0) 53 54 #define VERIFY_PERCPU_PTR(__p) \ 55 ({ \ 56 __verify_pcpu_ptr(__p); \ 57 (typeof(*(__p)) __kernel __force *)(__p); \ 58 }) 59 60 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 61 #define smp_processor_id() 0 62 #define num_present_cpus() 1 63 #define for_each_present_cpu(cpu) \ 64 for ((cpu) = 0; (cpu) < 1; (cpu)++) 65 66 #define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 67 68 #define CONFIG_NR_CPUS 1 69 #define ETH_HLEN ETHER_HDR_SIZE /* Total octets in header */ 70 71 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 72 #define WRAP (2 + ETH_HLEN + 4 + 32) 73 #define MTU 1500 74 #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 75 76 #define MVPP2_SMI_TIMEOUT 10000 77 78 /* RX Fifo Registers */ 79 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 80 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 81 #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 82 #define MVPP2_RX_FIFO_INIT_REG 0x64 83 84 /* RX DMA Top Registers */ 85 #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 86 #define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 87 #define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 88 #define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 89 #define MVPP2_POOL_BUF_SIZE_OFFSET 5 90 #define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 91 #define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 92 #define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 93 #define MVPP2_RXQ_POOL_SHORT_OFFS 20 94 #define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 95 #define MVPP2_RXQ_POOL_LONG_OFFS 24 96 #define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 97 #define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 98 #define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 99 #define MVPP2_RXQ_DISABLE_MASK BIT(31) 100 101 /* Parser Registers */ 102 #define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 103 #define MVPP2_PRS_PORT_LU_MAX 0xf 104 #define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 105 #define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 106 #define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 107 #define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 108 #define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 109 #define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 110 #define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 111 #define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 112 #define MVPP2_PRS_TCAM_IDX_REG 0x1100 113 #define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 114 #define MVPP2_PRS_TCAM_INV_MASK BIT(31) 115 #define MVPP2_PRS_SRAM_IDX_REG 0x1200 116 #define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 117 #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 118 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) 119 120 /* Classifier Registers */ 121 #define MVPP2_CLS_MODE_REG 0x1800 122 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 123 #define MVPP2_CLS_PORT_WAY_REG 0x1810 124 #define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 125 #define MVPP2_CLS_LKP_INDEX_REG 0x1814 126 #define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 127 #define MVPP2_CLS_LKP_TBL_REG 0x1818 128 #define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 129 #define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 130 #define MVPP2_CLS_FLOW_INDEX_REG 0x1820 131 #define MVPP2_CLS_FLOW_TBL0_REG 0x1824 132 #define MVPP2_CLS_FLOW_TBL1_REG 0x1828 133 #define MVPP2_CLS_FLOW_TBL2_REG 0x182c 134 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 135 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 136 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 137 #define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 138 #define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 139 #define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 140 141 /* Descriptor Manager Top Registers */ 142 #define MVPP2_RXQ_NUM_REG 0x2040 143 #define MVPP2_RXQ_DESC_ADDR_REG 0x2044 144 #define MVPP2_RXQ_DESC_SIZE_REG 0x2048 145 #define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 146 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 147 #define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 148 #define MVPP2_RXQ_NUM_NEW_OFFSET 16 149 #define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 150 #define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 151 #define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 152 #define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 153 #define MVPP2_RXQ_THRESH_REG 0x204c 154 #define MVPP2_OCCUPIED_THRESH_OFFSET 0 155 #define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 156 #define MVPP2_RXQ_INDEX_REG 0x2050 157 #define MVPP2_TXQ_NUM_REG 0x2080 158 #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 159 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 160 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 161 #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 162 #define MVPP2_TXQ_THRESH_REG 0x2094 163 #define MVPP2_TRANSMITTED_THRESH_OFFSET 16 164 #define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 165 #define MVPP2_TXQ_INDEX_REG 0x2098 166 #define MVPP2_TXQ_PREF_BUF_REG 0x209c 167 #define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 168 #define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 169 #define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 170 #define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 171 #define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 172 #define MVPP2_TXQ_PENDING_REG 0x20a0 173 #define MVPP2_TXQ_PENDING_MASK 0x3fff 174 #define MVPP2_TXQ_INT_STATUS_REG 0x20a4 175 #define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 176 #define MVPP2_TRANSMITTED_COUNT_OFFSET 16 177 #define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 178 #define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 179 #define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 180 #define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 181 #define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 182 #define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 183 #define MVPP2_TXQ_RSVD_CLR_OFFSET 16 184 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 185 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 186 #define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 187 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 188 #define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 189 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 190 191 /* MBUS bridge registers */ 192 #define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 193 #define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 194 #define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 195 #define MVPP2_BASE_ADDR_ENABLE 0x4060 196 197 /* Interrupt Cause and Mask registers */ 198 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 199 #define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 200 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 201 #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 202 #define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 203 #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 204 #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 205 #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 206 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 207 #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 208 #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 209 #define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 210 #define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 211 #define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 212 #define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 213 #define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 214 #define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 215 #define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 216 #define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 217 #define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 218 219 /* Buffer Manager registers */ 220 #define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 221 #define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 222 #define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 223 #define MVPP2_BM_POOL_SIZE_MASK 0xfff0 224 #define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 225 #define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 226 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 227 #define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 228 #define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 229 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 230 #define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 231 #define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 232 #define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 233 #define MVPP2_BM_START_MASK BIT(0) 234 #define MVPP2_BM_STOP_MASK BIT(1) 235 #define MVPP2_BM_STATE_MASK BIT(4) 236 #define MVPP2_BM_LOW_THRESH_OFFS 8 237 #define MVPP2_BM_LOW_THRESH_MASK 0x7f00 238 #define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 239 MVPP2_BM_LOW_THRESH_OFFS) 240 #define MVPP2_BM_HIGH_THRESH_OFFS 16 241 #define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 242 #define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 243 MVPP2_BM_HIGH_THRESH_OFFS) 244 #define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 245 #define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 246 #define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 247 #define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 248 #define MVPP2_BM_BPPE_FULL_MASK BIT(3) 249 #define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 250 #define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 251 #define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 252 #define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 253 #define MVPP2_BM_VIRT_ALLOC_REG 0x6440 254 #define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 255 #define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 256 #define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 257 #define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 258 #define MVPP2_BM_VIRT_RLS_REG 0x64c0 259 #define MVPP2_BM_MC_RLS_REG 0x64c4 260 #define MVPP2_BM_MC_ID_MASK 0xfff 261 #define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 262 263 /* TX Scheduler registers */ 264 #define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 265 #define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 266 #define MVPP2_TXP_SCHED_ENQ_MASK 0xff 267 #define MVPP2_TXP_SCHED_DISQ_OFFSET 8 268 #define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 269 #define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 270 #define MVPP2_TXP_SCHED_MTU_REG 0x801c 271 #define MVPP2_TXP_MTU_MAX 0x7FFFF 272 #define MVPP2_TXP_SCHED_REFILL_REG 0x8020 273 #define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 274 #define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 275 #define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 276 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 277 #define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 278 #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 279 #define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 280 #define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 281 #define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 282 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 283 #define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 284 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 285 #define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 286 287 /* TX general registers */ 288 #define MVPP2_TX_SNOOP_REG 0x8800 289 #define MVPP2_TX_PORT_FLUSH_REG 0x8810 290 #define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 291 292 /* LMS registers */ 293 #define MVPP2_SRC_ADDR_MIDDLE 0x24 294 #define MVPP2_SRC_ADDR_HIGH 0x28 295 #define MVPP2_PHY_AN_CFG0_REG 0x34 296 #define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 297 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 298 #define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 299 300 /* Per-port registers */ 301 #define MVPP2_GMAC_CTRL_0_REG 0x0 302 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) 303 #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 304 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 305 #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 306 #define MVPP2_GMAC_CTRL_1_REG 0x4 307 #define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 308 #define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 309 #define MVPP2_GMAC_PCS_LB_EN_BIT 6 310 #define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 311 #define MVPP2_GMAC_SA_LOW_OFFS 7 312 #define MVPP2_GMAC_CTRL_2_REG 0x8 313 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 314 #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 315 #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 316 #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 317 #define MVPP2_GMAC_AUTONEG_CONFIG 0xc 318 #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 319 #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 320 #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 321 #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 322 #define MVPP2_GMAC_AN_SPEED_EN BIT(7) 323 #define MVPP2_GMAC_FC_ADV_EN BIT(9) 324 #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 325 #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 326 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 327 #define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 328 #define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 329 #define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 330 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 331 332 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 333 334 /* Descriptor ring Macros */ 335 #define MVPP2_QUEUE_NEXT_DESC(q, index) \ 336 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 337 338 /* SMI: 0xc0054 -> offset 0x54 to lms_base */ 339 #define MVPP2_SMI 0x0054 340 #define MVPP2_PHY_REG_MASK 0x1f 341 /* SMI register fields */ 342 #define MVPP2_SMI_DATA_OFFS 0 /* Data */ 343 #define MVPP2_SMI_DATA_MASK (0xffff << MVPP2_SMI_DATA_OFFS) 344 #define MVPP2_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ 345 #define MVPP2_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ 346 #define MVPP2_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ 347 #define MVPP2_SMI_OPCODE_READ (1 << MVPP2_SMI_OPCODE_OFFS) 348 #define MVPP2_SMI_READ_VALID (1 << 27) /* Read Valid */ 349 #define MVPP2_SMI_BUSY (1 << 28) /* Busy */ 350 351 #define MVPP2_PHY_ADDR_MASK 0x1f 352 #define MVPP2_PHY_REG_MASK 0x1f 353 354 /* Various constants */ 355 356 /* Coalescing */ 357 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 358 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 359 #define MVPP2_RX_COAL_PKTS 32 360 #define MVPP2_RX_COAL_USEC 100 361 362 /* The two bytes Marvell header. Either contains a special value used 363 * by Marvell switches when a specific hardware mode is enabled (not 364 * supported by this driver) or is filled automatically by zeroes on 365 * the RX side. Those two bytes being at the front of the Ethernet 366 * header, they allow to have the IP header aligned on a 4 bytes 367 * boundary automatically: the hardware skips those two bytes on its 368 * own. 369 */ 370 #define MVPP2_MH_SIZE 2 371 #define MVPP2_ETH_TYPE_LEN 2 372 #define MVPP2_PPPOE_HDR_SIZE 8 373 #define MVPP2_VLAN_TAG_LEN 4 374 375 /* Lbtd 802.3 type */ 376 #define MVPP2_IP_LBDT_TYPE 0xfffa 377 378 #define MVPP2_CPU_D_CACHE_LINE_SIZE 32 379 #define MVPP2_TX_CSUM_MAX_SIZE 9800 380 381 /* Timeout constants */ 382 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 383 #define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 384 385 #define MVPP2_TX_MTU_MAX 0x7ffff 386 387 /* Maximum number of T-CONTs of PON port */ 388 #define MVPP2_MAX_TCONT 16 389 390 /* Maximum number of supported ports */ 391 #define MVPP2_MAX_PORTS 4 392 393 /* Maximum number of TXQs used by single port */ 394 #define MVPP2_MAX_TXQ 8 395 396 /* Maximum number of RXQs used by single port */ 397 #define MVPP2_MAX_RXQ 8 398 399 /* Default number of TXQs in use */ 400 #define MVPP2_DEFAULT_TXQ 1 401 402 /* Dfault number of RXQs in use */ 403 #define MVPP2_DEFAULT_RXQ 1 404 #define CONFIG_MV_ETH_RXQ 8 /* increment by 8 */ 405 406 /* Total number of RXQs available to all ports */ 407 #define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) 408 409 /* Max number of Rx descriptors */ 410 #define MVPP2_MAX_RXD 16 411 412 /* Max number of Tx descriptors */ 413 #define MVPP2_MAX_TXD 16 414 415 /* Amount of Tx descriptors that can be reserved at once by CPU */ 416 #define MVPP2_CPU_DESC_CHUNK 64 417 418 /* Max number of Tx descriptors in each aggregated queue */ 419 #define MVPP2_AGGR_TXQ_SIZE 256 420 421 /* Descriptor aligned size */ 422 #define MVPP2_DESC_ALIGNED_SIZE 32 423 424 /* Descriptor alignment mask */ 425 #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 426 427 /* RX FIFO constants */ 428 #define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 429 #define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 430 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 431 432 /* RX buffer constants */ 433 #define MVPP2_SKB_SHINFO_SIZE \ 434 0 435 436 #define MVPP2_RX_PKT_SIZE(mtu) \ 437 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 438 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 439 440 #define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 441 #define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 442 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 443 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 444 445 #define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 446 447 /* IPv6 max L3 address size */ 448 #define MVPP2_MAX_L3_ADDR_SIZE 16 449 450 /* Port flags */ 451 #define MVPP2_F_LOOPBACK BIT(0) 452 453 /* Marvell tag types */ 454 enum mvpp2_tag_type { 455 MVPP2_TAG_TYPE_NONE = 0, 456 MVPP2_TAG_TYPE_MH = 1, 457 MVPP2_TAG_TYPE_DSA = 2, 458 MVPP2_TAG_TYPE_EDSA = 3, 459 MVPP2_TAG_TYPE_VLAN = 4, 460 MVPP2_TAG_TYPE_LAST = 5 461 }; 462 463 /* Parser constants */ 464 #define MVPP2_PRS_TCAM_SRAM_SIZE 256 465 #define MVPP2_PRS_TCAM_WORDS 6 466 #define MVPP2_PRS_SRAM_WORDS 4 467 #define MVPP2_PRS_FLOW_ID_SIZE 64 468 #define MVPP2_PRS_FLOW_ID_MASK 0x3f 469 #define MVPP2_PRS_TCAM_ENTRY_INVALID 1 470 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 471 #define MVPP2_PRS_IPV4_HEAD 0x40 472 #define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 473 #define MVPP2_PRS_IPV4_MC 0xe0 474 #define MVPP2_PRS_IPV4_MC_MASK 0xf0 475 #define MVPP2_PRS_IPV4_BC_MASK 0xff 476 #define MVPP2_PRS_IPV4_IHL 0x5 477 #define MVPP2_PRS_IPV4_IHL_MASK 0xf 478 #define MVPP2_PRS_IPV6_MC 0xff 479 #define MVPP2_PRS_IPV6_MC_MASK 0xff 480 #define MVPP2_PRS_IPV6_HOP_MASK 0xff 481 #define MVPP2_PRS_TCAM_PROTO_MASK 0xff 482 #define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 483 #define MVPP2_PRS_DBL_VLANS_MAX 100 484 485 /* Tcam structure: 486 * - lookup ID - 4 bits 487 * - port ID - 1 byte 488 * - additional information - 1 byte 489 * - header data - 8 bytes 490 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 491 */ 492 #define MVPP2_PRS_AI_BITS 8 493 #define MVPP2_PRS_PORT_MASK 0xff 494 #define MVPP2_PRS_LU_MASK 0xf 495 #define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 496 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 497 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 498 (((offs) * 2) - ((offs) % 2) + 2) 499 #define MVPP2_PRS_TCAM_AI_BYTE 16 500 #define MVPP2_PRS_TCAM_PORT_BYTE 17 501 #define MVPP2_PRS_TCAM_LU_BYTE 20 502 #define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 503 #define MVPP2_PRS_TCAM_INV_WORD 5 504 /* Tcam entries ID */ 505 #define MVPP2_PE_DROP_ALL 0 506 #define MVPP2_PE_FIRST_FREE_TID 1 507 #define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 508 #define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 509 #define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 510 #define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 511 #define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 512 #define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 513 #define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 514 #define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 515 #define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 516 #define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 517 #define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 518 #define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 519 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 520 #define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 521 #define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 522 #define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 523 #define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 524 #define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 525 #define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 526 #define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 527 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 528 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 529 #define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 530 #define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 531 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 532 533 /* Sram structure 534 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 535 */ 536 #define MVPP2_PRS_SRAM_RI_OFFS 0 537 #define MVPP2_PRS_SRAM_RI_WORD 0 538 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 539 #define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 540 #define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 541 #define MVPP2_PRS_SRAM_SHIFT_OFFS 64 542 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 543 #define MVPP2_PRS_SRAM_UDF_OFFS 73 544 #define MVPP2_PRS_SRAM_UDF_BITS 8 545 #define MVPP2_PRS_SRAM_UDF_MASK 0xff 546 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 547 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 548 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 549 #define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 550 #define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 551 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 552 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 553 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 554 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 555 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 556 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 557 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 558 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 559 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 560 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 561 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 562 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 563 #define MVPP2_PRS_SRAM_AI_OFFS 90 564 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 565 #define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 566 #define MVPP2_PRS_SRAM_AI_MASK 0xff 567 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 568 #define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 569 #define MVPP2_PRS_SRAM_LU_DONE_BIT 110 570 #define MVPP2_PRS_SRAM_LU_GEN_BIT 111 571 572 /* Sram result info bits assignment */ 573 #define MVPP2_PRS_RI_MAC_ME_MASK 0x1 574 #define MVPP2_PRS_RI_DSA_MASK 0x2 575 #define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 576 #define MVPP2_PRS_RI_VLAN_NONE 0x0 577 #define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 578 #define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 579 #define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 580 #define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 581 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 582 #define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 583 #define MVPP2_PRS_RI_L2_UCAST 0x0 584 #define MVPP2_PRS_RI_L2_MCAST BIT(9) 585 #define MVPP2_PRS_RI_L2_BCAST BIT(10) 586 #define MVPP2_PRS_RI_PPPOE_MASK 0x800 587 #define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 588 #define MVPP2_PRS_RI_L3_UN 0x0 589 #define MVPP2_PRS_RI_L3_IP4 BIT(12) 590 #define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 591 #define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 592 #define MVPP2_PRS_RI_L3_IP6 BIT(14) 593 #define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 594 #define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 595 #define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 596 #define MVPP2_PRS_RI_L3_UCAST 0x0 597 #define MVPP2_PRS_RI_L3_MCAST BIT(15) 598 #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 599 #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 600 #define MVPP2_PRS_RI_UDF3_MASK 0x300000 601 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 602 #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 603 #define MVPP2_PRS_RI_L4_TCP BIT(22) 604 #define MVPP2_PRS_RI_L4_UDP BIT(23) 605 #define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 606 #define MVPP2_PRS_RI_UDF7_MASK 0x60000000 607 #define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 608 #define MVPP2_PRS_RI_DROP_MASK 0x80000000 609 610 /* Sram additional info bits assignment */ 611 #define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 612 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 613 #define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 614 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 615 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 616 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 617 #define MVPP2_PRS_SINGLE_VLAN_AI 0 618 #define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 619 620 /* DSA/EDSA type */ 621 #define MVPP2_PRS_TAGGED true 622 #define MVPP2_PRS_UNTAGGED false 623 #define MVPP2_PRS_EDSA true 624 #define MVPP2_PRS_DSA false 625 626 /* MAC entries, shadow udf */ 627 enum mvpp2_prs_udf { 628 MVPP2_PRS_UDF_MAC_DEF, 629 MVPP2_PRS_UDF_MAC_RANGE, 630 MVPP2_PRS_UDF_L2_DEF, 631 MVPP2_PRS_UDF_L2_DEF_COPY, 632 MVPP2_PRS_UDF_L2_USER, 633 }; 634 635 /* Lookup ID */ 636 enum mvpp2_prs_lookup { 637 MVPP2_PRS_LU_MH, 638 MVPP2_PRS_LU_MAC, 639 MVPP2_PRS_LU_DSA, 640 MVPP2_PRS_LU_VLAN, 641 MVPP2_PRS_LU_L2, 642 MVPP2_PRS_LU_PPPOE, 643 MVPP2_PRS_LU_IP4, 644 MVPP2_PRS_LU_IP6, 645 MVPP2_PRS_LU_FLOWS, 646 MVPP2_PRS_LU_LAST, 647 }; 648 649 /* L3 cast enum */ 650 enum mvpp2_prs_l3_cast { 651 MVPP2_PRS_L3_UNI_CAST, 652 MVPP2_PRS_L3_MULTI_CAST, 653 MVPP2_PRS_L3_BROAD_CAST 654 }; 655 656 /* Classifier constants */ 657 #define MVPP2_CLS_FLOWS_TBL_SIZE 512 658 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 659 #define MVPP2_CLS_LKP_TBL_SIZE 64 660 661 /* BM constants */ 662 #define MVPP2_BM_POOLS_NUM 1 663 #define MVPP2_BM_LONG_BUF_NUM 16 664 #define MVPP2_BM_SHORT_BUF_NUM 16 665 #define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 666 #define MVPP2_BM_POOL_PTR_ALIGN 128 667 #define MVPP2_BM_SWF_LONG_POOL(port) 0 668 669 /* BM cookie (32 bits) definition */ 670 #define MVPP2_BM_COOKIE_POOL_OFFS 8 671 #define MVPP2_BM_COOKIE_CPU_OFFS 24 672 673 /* BM short pool packet size 674 * These value assure that for SWF the total number 675 * of bytes allocated for each buffer will be 512 676 */ 677 #define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 678 679 enum mvpp2_bm_type { 680 MVPP2_BM_FREE, 681 MVPP2_BM_SWF_LONG, 682 MVPP2_BM_SWF_SHORT 683 }; 684 685 /* Definitions */ 686 687 /* Shared Packet Processor resources */ 688 struct mvpp2 { 689 /* Shared registers' base addresses */ 690 void __iomem *base; 691 void __iomem *lms_base; 692 693 /* List of pointers to port structures */ 694 struct mvpp2_port **port_list; 695 696 /* Aggregated TXQs */ 697 struct mvpp2_tx_queue *aggr_txqs; 698 699 /* BM pools */ 700 struct mvpp2_bm_pool *bm_pools; 701 702 /* PRS shadow table */ 703 struct mvpp2_prs_shadow *prs_shadow; 704 /* PRS auxiliary table for double vlan entries control */ 705 bool *prs_double_vlans; 706 707 /* Tclk value */ 708 u32 tclk; 709 710 /* HW version */ 711 enum { MVPP21, MVPP22 } hw_version; 712 713 struct mii_dev *bus; 714 }; 715 716 struct mvpp2_pcpu_stats { 717 u64 rx_packets; 718 u64 rx_bytes; 719 u64 tx_packets; 720 u64 tx_bytes; 721 }; 722 723 struct mvpp2_port { 724 u8 id; 725 726 int irq; 727 728 struct mvpp2 *priv; 729 730 /* Per-port registers' base address */ 731 void __iomem *base; 732 733 struct mvpp2_rx_queue **rxqs; 734 struct mvpp2_tx_queue **txqs; 735 736 int pkt_size; 737 738 u32 pending_cause_rx; 739 740 /* Per-CPU port control */ 741 struct mvpp2_port_pcpu __percpu *pcpu; 742 743 /* Flags */ 744 unsigned long flags; 745 746 u16 tx_ring_size; 747 u16 rx_ring_size; 748 struct mvpp2_pcpu_stats __percpu *stats; 749 750 struct phy_device *phy_dev; 751 phy_interface_t phy_interface; 752 int phy_node; 753 int phyaddr; 754 int init; 755 unsigned int link; 756 unsigned int duplex; 757 unsigned int speed; 758 759 struct mvpp2_bm_pool *pool_long; 760 struct mvpp2_bm_pool *pool_short; 761 762 /* Index of first port's physical RXQ */ 763 u8 first_rxq; 764 765 u8 dev_addr[ETH_ALEN]; 766 }; 767 768 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 769 * layout of the transmit and reception DMA descriptors, and their 770 * layout is therefore defined by the hardware design 771 */ 772 773 #define MVPP2_TXD_L3_OFF_SHIFT 0 774 #define MVPP2_TXD_IP_HLEN_SHIFT 8 775 #define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 776 #define MVPP2_TXD_L4_CSUM_NOT BIT(14) 777 #define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 778 #define MVPP2_TXD_PADDING_DISABLE BIT(23) 779 #define MVPP2_TXD_L4_UDP BIT(24) 780 #define MVPP2_TXD_L3_IP6 BIT(26) 781 #define MVPP2_TXD_L_DESC BIT(28) 782 #define MVPP2_TXD_F_DESC BIT(29) 783 784 #define MVPP2_RXD_ERR_SUMMARY BIT(15) 785 #define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 786 #define MVPP2_RXD_ERR_CRC 0x0 787 #define MVPP2_RXD_ERR_OVERRUN BIT(13) 788 #define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 789 #define MVPP2_RXD_BM_POOL_ID_OFFS 16 790 #define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 791 #define MVPP2_RXD_HWF_SYNC BIT(21) 792 #define MVPP2_RXD_L4_CSUM_OK BIT(22) 793 #define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 794 #define MVPP2_RXD_L4_TCP BIT(25) 795 #define MVPP2_RXD_L4_UDP BIT(26) 796 #define MVPP2_RXD_L3_IP4 BIT(28) 797 #define MVPP2_RXD_L3_IP6 BIT(30) 798 #define MVPP2_RXD_BUF_HDR BIT(31) 799 800 /* HW TX descriptor for PPv2.1 */ 801 struct mvpp21_tx_desc { 802 u32 command; /* Options used by HW for packet transmitting.*/ 803 u8 packet_offset; /* the offset from the buffer beginning */ 804 u8 phys_txq; /* destination queue ID */ 805 u16 data_size; /* data size of transmitted packet in bytes */ 806 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 807 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 808 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 809 u32 reserved2; /* reserved (for future use) */ 810 }; 811 812 /* HW RX descriptor for PPv2.1 */ 813 struct mvpp21_rx_desc { 814 u32 status; /* info about received packet */ 815 u16 reserved1; /* parser_info (for future use, PnC) */ 816 u16 data_size; /* size of received packet in bytes */ 817 u32 buf_dma_addr; /* physical address of the buffer */ 818 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 819 u16 reserved2; /* gem_port_id (for future use, PON) */ 820 u16 reserved3; /* csum_l4 (for future use, PnC) */ 821 u8 reserved4; /* bm_qset (for future use, BM) */ 822 u8 reserved5; 823 u16 reserved6; /* classify_info (for future use, PnC) */ 824 u32 reserved7; /* flow_id (for future use, PnC) */ 825 u32 reserved8; 826 }; 827 828 /* Opaque type used by the driver to manipulate the HW TX and RX 829 * descriptors 830 */ 831 struct mvpp2_tx_desc { 832 union { 833 struct mvpp21_tx_desc pp21; 834 }; 835 }; 836 837 struct mvpp2_rx_desc { 838 union { 839 struct mvpp21_rx_desc pp21; 840 }; 841 }; 842 843 /* Per-CPU Tx queue control */ 844 struct mvpp2_txq_pcpu { 845 int cpu; 846 847 /* Number of Tx DMA descriptors in the descriptor ring */ 848 int size; 849 850 /* Number of currently used Tx DMA descriptor in the 851 * descriptor ring 852 */ 853 int count; 854 855 /* Number of Tx DMA descriptors reserved for each CPU */ 856 int reserved_num; 857 858 /* Index of last TX DMA descriptor that was inserted */ 859 int txq_put_index; 860 861 /* Index of the TX DMA descriptor to be cleaned up */ 862 int txq_get_index; 863 }; 864 865 struct mvpp2_tx_queue { 866 /* Physical number of this Tx queue */ 867 u8 id; 868 869 /* Logical number of this Tx queue */ 870 u8 log_id; 871 872 /* Number of Tx DMA descriptors in the descriptor ring */ 873 int size; 874 875 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 876 int count; 877 878 /* Per-CPU control of physical Tx queues */ 879 struct mvpp2_txq_pcpu __percpu *pcpu; 880 881 u32 done_pkts_coal; 882 883 /* Virtual address of thex Tx DMA descriptors array */ 884 struct mvpp2_tx_desc *descs; 885 886 /* DMA address of the Tx DMA descriptors array */ 887 dma_addr_t descs_dma; 888 889 /* Index of the last Tx DMA descriptor */ 890 int last_desc; 891 892 /* Index of the next Tx DMA descriptor to process */ 893 int next_desc_to_proc; 894 }; 895 896 struct mvpp2_rx_queue { 897 /* RX queue number, in the range 0-31 for physical RXQs */ 898 u8 id; 899 900 /* Num of rx descriptors in the rx descriptor ring */ 901 int size; 902 903 u32 pkts_coal; 904 u32 time_coal; 905 906 /* Virtual address of the RX DMA descriptors array */ 907 struct mvpp2_rx_desc *descs; 908 909 /* DMA address of the RX DMA descriptors array */ 910 dma_addr_t descs_dma; 911 912 /* Index of the last RX DMA descriptor */ 913 int last_desc; 914 915 /* Index of the next RX DMA descriptor to process */ 916 int next_desc_to_proc; 917 918 /* ID of port to which physical RXQ is mapped */ 919 int port; 920 921 /* Port's logic RXQ number to which physical RXQ is mapped */ 922 int logic_rxq; 923 }; 924 925 union mvpp2_prs_tcam_entry { 926 u32 word[MVPP2_PRS_TCAM_WORDS]; 927 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 928 }; 929 930 union mvpp2_prs_sram_entry { 931 u32 word[MVPP2_PRS_SRAM_WORDS]; 932 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 933 }; 934 935 struct mvpp2_prs_entry { 936 u32 index; 937 union mvpp2_prs_tcam_entry tcam; 938 union mvpp2_prs_sram_entry sram; 939 }; 940 941 struct mvpp2_prs_shadow { 942 bool valid; 943 bool finish; 944 945 /* Lookup ID */ 946 int lu; 947 948 /* User defined offset */ 949 int udf; 950 951 /* Result info */ 952 u32 ri; 953 u32 ri_mask; 954 }; 955 956 struct mvpp2_cls_flow_entry { 957 u32 index; 958 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 959 }; 960 961 struct mvpp2_cls_lookup_entry { 962 u32 lkpid; 963 u32 way; 964 u32 data; 965 }; 966 967 struct mvpp2_bm_pool { 968 /* Pool number in the range 0-7 */ 969 int id; 970 enum mvpp2_bm_type type; 971 972 /* Buffer Pointers Pool External (BPPE) size */ 973 int size; 974 /* Number of buffers for this pool */ 975 int buf_num; 976 /* Pool buffer size */ 977 int buf_size; 978 /* Packet size */ 979 int pkt_size; 980 981 /* BPPE virtual base address */ 982 unsigned long *virt_addr; 983 /* BPPE DMA base address */ 984 dma_addr_t dma_addr; 985 986 /* Ports using BM pool */ 987 u32 port_map; 988 989 /* Occupied buffers indicator */ 990 int in_use_thresh; 991 }; 992 993 /* Static declaractions */ 994 995 /* Number of RXQs used by single port */ 996 static int rxq_number = MVPP2_DEFAULT_RXQ; 997 /* Number of TXQs used by single port */ 998 static int txq_number = MVPP2_DEFAULT_TXQ; 999 1000 #define MVPP2_DRIVER_NAME "mvpp2" 1001 #define MVPP2_DRIVER_VERSION "1.0" 1002 1003 /* 1004 * U-Boot internal data, mostly uncached buffers for descriptors and data 1005 */ 1006 struct buffer_location { 1007 struct mvpp2_tx_desc *aggr_tx_descs; 1008 struct mvpp2_tx_desc *tx_descs; 1009 struct mvpp2_rx_desc *rx_descs; 1010 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1011 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1012 int first_rxq; 1013 }; 1014 1015 /* 1016 * All 4 interfaces use the same global buffer, since only one interface 1017 * can be enabled at once 1018 */ 1019 static struct buffer_location buffer_loc; 1020 1021 /* 1022 * Page table entries are set to 1MB, or multiples of 1MB 1023 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1024 */ 1025 #define BD_SPACE (1 << 20) 1026 1027 /* Utility/helper methods */ 1028 1029 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1030 { 1031 writel(data, priv->base + offset); 1032 } 1033 1034 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1035 { 1036 return readl(priv->base + offset); 1037 } 1038 1039 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1040 struct mvpp2_tx_desc *tx_desc, 1041 dma_addr_t dma_addr) 1042 { 1043 tx_desc->pp21.buf_dma_addr = dma_addr; 1044 } 1045 1046 static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1047 struct mvpp2_tx_desc *tx_desc, 1048 size_t size) 1049 { 1050 tx_desc->pp21.data_size = size; 1051 } 1052 1053 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1054 struct mvpp2_tx_desc *tx_desc, 1055 unsigned int txq) 1056 { 1057 tx_desc->pp21.phys_txq = txq; 1058 } 1059 1060 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1061 struct mvpp2_tx_desc *tx_desc, 1062 unsigned int command) 1063 { 1064 tx_desc->pp21.command = command; 1065 } 1066 1067 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1068 struct mvpp2_tx_desc *tx_desc, 1069 unsigned int offset) 1070 { 1071 tx_desc->pp21.packet_offset = offset; 1072 } 1073 1074 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1075 struct mvpp2_rx_desc *rx_desc) 1076 { 1077 return rx_desc->pp21.buf_dma_addr; 1078 } 1079 1080 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1081 struct mvpp2_rx_desc *rx_desc) 1082 { 1083 return rx_desc->pp21.buf_cookie; 1084 } 1085 1086 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1087 struct mvpp2_rx_desc *rx_desc) 1088 { 1089 return rx_desc->pp21.data_size; 1090 } 1091 1092 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1093 struct mvpp2_rx_desc *rx_desc) 1094 { 1095 return rx_desc->pp21.status; 1096 } 1097 1098 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1099 { 1100 txq_pcpu->txq_get_index++; 1101 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1102 txq_pcpu->txq_get_index = 0; 1103 } 1104 1105 /* Get number of physical egress port */ 1106 static inline int mvpp2_egress_port(struct mvpp2_port *port) 1107 { 1108 return MVPP2_MAX_TCONT + port->id; 1109 } 1110 1111 /* Get number of physical TXQ */ 1112 static inline int mvpp2_txq_phys(int port, int txq) 1113 { 1114 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1115 } 1116 1117 /* Parser configuration routines */ 1118 1119 /* Update parser tcam and sram hw entries */ 1120 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1121 { 1122 int i; 1123 1124 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1125 return -EINVAL; 1126 1127 /* Clear entry invalidation bit */ 1128 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1129 1130 /* Write tcam index - indirect access */ 1131 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1132 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1133 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1134 1135 /* Write sram index - indirect access */ 1136 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1137 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1138 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1139 1140 return 0; 1141 } 1142 1143 /* Read tcam entry from hw */ 1144 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1145 { 1146 int i; 1147 1148 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1149 return -EINVAL; 1150 1151 /* Write tcam index - indirect access */ 1152 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1153 1154 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1155 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1156 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1157 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1158 1159 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1160 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1161 1162 /* Write sram index - indirect access */ 1163 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1164 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1165 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1166 1167 return 0; 1168 } 1169 1170 /* Invalidate tcam hw entry */ 1171 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1172 { 1173 /* Write index - indirect access */ 1174 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1175 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1176 MVPP2_PRS_TCAM_INV_MASK); 1177 } 1178 1179 /* Enable shadow table entry and set its lookup ID */ 1180 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1181 { 1182 priv->prs_shadow[index].valid = true; 1183 priv->prs_shadow[index].lu = lu; 1184 } 1185 1186 /* Update ri fields in shadow table entry */ 1187 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1188 unsigned int ri, unsigned int ri_mask) 1189 { 1190 priv->prs_shadow[index].ri_mask = ri_mask; 1191 priv->prs_shadow[index].ri = ri; 1192 } 1193 1194 /* Update lookup field in tcam sw entry */ 1195 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1196 { 1197 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1198 1199 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1200 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1201 } 1202 1203 /* Update mask for single port in tcam sw entry */ 1204 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1205 unsigned int port, bool add) 1206 { 1207 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1208 1209 if (add) 1210 pe->tcam.byte[enable_off] &= ~(1 << port); 1211 else 1212 pe->tcam.byte[enable_off] |= 1 << port; 1213 } 1214 1215 /* Update port map in tcam sw entry */ 1216 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1217 unsigned int ports) 1218 { 1219 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1220 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1221 1222 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1223 pe->tcam.byte[enable_off] &= ~port_mask; 1224 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1225 } 1226 1227 /* Obtain port map from tcam sw entry */ 1228 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1229 { 1230 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1231 1232 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1233 } 1234 1235 /* Set byte of data and its enable bits in tcam sw entry */ 1236 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1237 unsigned int offs, unsigned char byte, 1238 unsigned char enable) 1239 { 1240 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1241 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1242 } 1243 1244 /* Get byte of data and its enable bits from tcam sw entry */ 1245 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1246 unsigned int offs, unsigned char *byte, 1247 unsigned char *enable) 1248 { 1249 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1250 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1251 } 1252 1253 /* Set ethertype in tcam sw entry */ 1254 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1255 unsigned short ethertype) 1256 { 1257 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1258 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1259 } 1260 1261 /* Set bits in sram sw entry */ 1262 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1263 int val) 1264 { 1265 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1266 } 1267 1268 /* Clear bits in sram sw entry */ 1269 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1270 int val) 1271 { 1272 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1273 } 1274 1275 /* Update ri bits in sram sw entry */ 1276 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1277 unsigned int bits, unsigned int mask) 1278 { 1279 unsigned int i; 1280 1281 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1282 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1283 1284 if (!(mask & BIT(i))) 1285 continue; 1286 1287 if (bits & BIT(i)) 1288 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1289 else 1290 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1291 1292 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1293 } 1294 } 1295 1296 /* Update ai bits in sram sw entry */ 1297 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1298 unsigned int bits, unsigned int mask) 1299 { 1300 unsigned int i; 1301 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1302 1303 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1304 1305 if (!(mask & BIT(i))) 1306 continue; 1307 1308 if (bits & BIT(i)) 1309 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1310 else 1311 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1312 1313 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1314 } 1315 } 1316 1317 /* Read ai bits from sram sw entry */ 1318 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1319 { 1320 u8 bits; 1321 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1322 int ai_en_off = ai_off + 1; 1323 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1324 1325 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1326 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1327 1328 return bits; 1329 } 1330 1331 /* In sram sw entry set lookup ID field of the tcam key to be used in the next 1332 * lookup interation 1333 */ 1334 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1335 unsigned int lu) 1336 { 1337 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1338 1339 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1340 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1341 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1342 } 1343 1344 /* In the sram sw entry set sign and value of the next lookup offset 1345 * and the offset value generated to the classifier 1346 */ 1347 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1348 unsigned int op) 1349 { 1350 /* Set sign */ 1351 if (shift < 0) { 1352 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1353 shift = 0 - shift; 1354 } else { 1355 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1356 } 1357 1358 /* Set value */ 1359 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1360 (unsigned char)shift; 1361 1362 /* Reset and set operation */ 1363 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1364 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1365 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1366 1367 /* Set base offset as current */ 1368 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1369 } 1370 1371 /* In the sram sw entry set sign and value of the user defined offset 1372 * generated to the classifier 1373 */ 1374 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1375 unsigned int type, int offset, 1376 unsigned int op) 1377 { 1378 /* Set sign */ 1379 if (offset < 0) { 1380 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1381 offset = 0 - offset; 1382 } else { 1383 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1384 } 1385 1386 /* Set value */ 1387 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1388 MVPP2_PRS_SRAM_UDF_MASK); 1389 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1390 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1391 MVPP2_PRS_SRAM_UDF_BITS)] &= 1392 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1393 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1394 MVPP2_PRS_SRAM_UDF_BITS)] |= 1395 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1396 1397 /* Set offset type */ 1398 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1399 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1400 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1401 1402 /* Set offset operation */ 1403 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1404 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1405 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1406 1407 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1408 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1409 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1410 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1411 1412 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1413 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1414 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1415 1416 /* Set base offset as current */ 1417 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1418 } 1419 1420 /* Find parser flow entry */ 1421 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1422 { 1423 struct mvpp2_prs_entry *pe; 1424 int tid; 1425 1426 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1427 if (!pe) 1428 return NULL; 1429 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1430 1431 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1432 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1433 u8 bits; 1434 1435 if (!priv->prs_shadow[tid].valid || 1436 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1437 continue; 1438 1439 pe->index = tid; 1440 mvpp2_prs_hw_read(priv, pe); 1441 bits = mvpp2_prs_sram_ai_get(pe); 1442 1443 /* Sram store classification lookup ID in AI bits [5:0] */ 1444 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1445 return pe; 1446 } 1447 kfree(pe); 1448 1449 return NULL; 1450 } 1451 1452 /* Return first free tcam index, seeking from start to end */ 1453 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1454 unsigned char end) 1455 { 1456 int tid; 1457 1458 if (start > end) 1459 swap(start, end); 1460 1461 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1462 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1463 1464 for (tid = start; tid <= end; tid++) { 1465 if (!priv->prs_shadow[tid].valid) 1466 return tid; 1467 } 1468 1469 return -EINVAL; 1470 } 1471 1472 /* Enable/disable dropping all mac da's */ 1473 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1474 { 1475 struct mvpp2_prs_entry pe; 1476 1477 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1478 /* Entry exist - update port only */ 1479 pe.index = MVPP2_PE_DROP_ALL; 1480 mvpp2_prs_hw_read(priv, &pe); 1481 } else { 1482 /* Entry doesn't exist - create new */ 1483 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1484 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1485 pe.index = MVPP2_PE_DROP_ALL; 1486 1487 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1488 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1489 MVPP2_PRS_RI_DROP_MASK); 1490 1491 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1492 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1493 1494 /* Update shadow table */ 1495 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1496 1497 /* Mask all ports */ 1498 mvpp2_prs_tcam_port_map_set(&pe, 0); 1499 } 1500 1501 /* Update port mask */ 1502 mvpp2_prs_tcam_port_set(&pe, port, add); 1503 1504 mvpp2_prs_hw_write(priv, &pe); 1505 } 1506 1507 /* Set port to promiscuous mode */ 1508 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1509 { 1510 struct mvpp2_prs_entry pe; 1511 1512 /* Promiscuous mode - Accept unknown packets */ 1513 1514 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1515 /* Entry exist - update port only */ 1516 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1517 mvpp2_prs_hw_read(priv, &pe); 1518 } else { 1519 /* Entry doesn't exist - create new */ 1520 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1521 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1522 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1523 1524 /* Continue - set next lookup */ 1525 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1526 1527 /* Set result info bits */ 1528 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1529 MVPP2_PRS_RI_L2_CAST_MASK); 1530 1531 /* Shift to ethertype */ 1532 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1533 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1534 1535 /* Mask all ports */ 1536 mvpp2_prs_tcam_port_map_set(&pe, 0); 1537 1538 /* Update shadow table */ 1539 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1540 } 1541 1542 /* Update port mask */ 1543 mvpp2_prs_tcam_port_set(&pe, port, add); 1544 1545 mvpp2_prs_hw_write(priv, &pe); 1546 } 1547 1548 /* Accept multicast */ 1549 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1550 bool add) 1551 { 1552 struct mvpp2_prs_entry pe; 1553 unsigned char da_mc; 1554 1555 /* Ethernet multicast address first byte is 1556 * 0x01 for IPv4 and 0x33 for IPv6 1557 */ 1558 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1559 1560 if (priv->prs_shadow[index].valid) { 1561 /* Entry exist - update port only */ 1562 pe.index = index; 1563 mvpp2_prs_hw_read(priv, &pe); 1564 } else { 1565 /* Entry doesn't exist - create new */ 1566 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1567 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1568 pe.index = index; 1569 1570 /* Continue - set next lookup */ 1571 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1572 1573 /* Set result info bits */ 1574 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1575 MVPP2_PRS_RI_L2_CAST_MASK); 1576 1577 /* Update tcam entry data first byte */ 1578 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1579 1580 /* Shift to ethertype */ 1581 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1582 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1583 1584 /* Mask all ports */ 1585 mvpp2_prs_tcam_port_map_set(&pe, 0); 1586 1587 /* Update shadow table */ 1588 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1589 } 1590 1591 /* Update port mask */ 1592 mvpp2_prs_tcam_port_set(&pe, port, add); 1593 1594 mvpp2_prs_hw_write(priv, &pe); 1595 } 1596 1597 /* Parser per-port initialization */ 1598 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1599 int lu_max, int offset) 1600 { 1601 u32 val; 1602 1603 /* Set lookup ID */ 1604 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1605 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1606 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1607 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1608 1609 /* Set maximum number of loops for packet received from port */ 1610 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1611 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1612 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1613 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1614 1615 /* Set initial offset for packet header extraction for the first 1616 * searching loop 1617 */ 1618 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1619 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1620 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1621 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1622 } 1623 1624 /* Default flow entries initialization for all ports */ 1625 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1626 { 1627 struct mvpp2_prs_entry pe; 1628 int port; 1629 1630 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1631 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1632 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1633 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1634 1635 /* Mask all ports */ 1636 mvpp2_prs_tcam_port_map_set(&pe, 0); 1637 1638 /* Set flow ID*/ 1639 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1640 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1641 1642 /* Update shadow table and hw entry */ 1643 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1644 mvpp2_prs_hw_write(priv, &pe); 1645 } 1646 } 1647 1648 /* Set default entry for Marvell Header field */ 1649 static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1650 { 1651 struct mvpp2_prs_entry pe; 1652 1653 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1654 1655 pe.index = MVPP2_PE_MH_DEFAULT; 1656 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1657 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1658 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1659 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1660 1661 /* Unmask all ports */ 1662 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1663 1664 /* Update shadow table and hw entry */ 1665 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1666 mvpp2_prs_hw_write(priv, &pe); 1667 } 1668 1669 /* Set default entires (place holder) for promiscuous, non-promiscuous and 1670 * multicast MAC addresses 1671 */ 1672 static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1673 { 1674 struct mvpp2_prs_entry pe; 1675 1676 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1677 1678 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1679 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1680 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1681 1682 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1683 MVPP2_PRS_RI_DROP_MASK); 1684 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1685 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1686 1687 /* Unmask all ports */ 1688 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1689 1690 /* Update shadow table and hw entry */ 1691 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1692 mvpp2_prs_hw_write(priv, &pe); 1693 1694 /* place holders only - no ports */ 1695 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1696 mvpp2_prs_mac_promisc_set(priv, 0, false); 1697 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1698 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1699 } 1700 1701 /* Match basic ethertypes */ 1702 static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1703 { 1704 struct mvpp2_prs_entry pe; 1705 int tid; 1706 1707 /* Ethertype: PPPoE */ 1708 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1709 MVPP2_PE_LAST_FREE_TID); 1710 if (tid < 0) 1711 return tid; 1712 1713 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1714 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1715 pe.index = tid; 1716 1717 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 1718 1719 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 1720 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1721 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1722 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 1723 MVPP2_PRS_RI_PPPOE_MASK); 1724 1725 /* Update shadow table and hw entry */ 1726 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1727 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1728 priv->prs_shadow[pe.index].finish = false; 1729 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 1730 MVPP2_PRS_RI_PPPOE_MASK); 1731 mvpp2_prs_hw_write(priv, &pe); 1732 1733 /* Ethertype: ARP */ 1734 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1735 MVPP2_PE_LAST_FREE_TID); 1736 if (tid < 0) 1737 return tid; 1738 1739 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1740 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1741 pe.index = tid; 1742 1743 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 1744 1745 /* Generate flow in the next iteration*/ 1746 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1747 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1748 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 1749 MVPP2_PRS_RI_L3_PROTO_MASK); 1750 /* Set L3 offset */ 1751 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1752 MVPP2_ETH_TYPE_LEN, 1753 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1754 1755 /* Update shadow table and hw entry */ 1756 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1757 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1758 priv->prs_shadow[pe.index].finish = true; 1759 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 1760 MVPP2_PRS_RI_L3_PROTO_MASK); 1761 mvpp2_prs_hw_write(priv, &pe); 1762 1763 /* Ethertype: LBTD */ 1764 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1765 MVPP2_PE_LAST_FREE_TID); 1766 if (tid < 0) 1767 return tid; 1768 1769 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1770 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1771 pe.index = tid; 1772 1773 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 1774 1775 /* Generate flow in the next iteration*/ 1776 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1777 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1778 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1779 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1780 MVPP2_PRS_RI_CPU_CODE_MASK | 1781 MVPP2_PRS_RI_UDF3_MASK); 1782 /* Set L3 offset */ 1783 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1784 MVPP2_ETH_TYPE_LEN, 1785 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1786 1787 /* Update shadow table and hw entry */ 1788 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1789 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1790 priv->prs_shadow[pe.index].finish = true; 1791 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 1792 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1793 MVPP2_PRS_RI_CPU_CODE_MASK | 1794 MVPP2_PRS_RI_UDF3_MASK); 1795 mvpp2_prs_hw_write(priv, &pe); 1796 1797 /* Ethertype: IPv4 without options */ 1798 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1799 MVPP2_PE_LAST_FREE_TID); 1800 if (tid < 0) 1801 return tid; 1802 1803 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1804 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1805 pe.index = tid; 1806 1807 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 1808 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1809 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 1810 MVPP2_PRS_IPV4_HEAD_MASK | 1811 MVPP2_PRS_IPV4_IHL_MASK); 1812 1813 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 1814 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 1815 MVPP2_PRS_RI_L3_PROTO_MASK); 1816 /* Skip eth_type + 4 bytes of IP header */ 1817 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 1818 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1819 /* Set L3 offset */ 1820 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1821 MVPP2_ETH_TYPE_LEN, 1822 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1823 1824 /* Update shadow table and hw entry */ 1825 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1826 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1827 priv->prs_shadow[pe.index].finish = false; 1828 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 1829 MVPP2_PRS_RI_L3_PROTO_MASK); 1830 mvpp2_prs_hw_write(priv, &pe); 1831 1832 /* Ethertype: IPv4 with options */ 1833 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1834 MVPP2_PE_LAST_FREE_TID); 1835 if (tid < 0) 1836 return tid; 1837 1838 pe.index = tid; 1839 1840 /* Clear tcam data before updating */ 1841 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 1842 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 1843 1844 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 1845 MVPP2_PRS_IPV4_HEAD, 1846 MVPP2_PRS_IPV4_HEAD_MASK); 1847 1848 /* Clear ri before updating */ 1849 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1850 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1851 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 1852 MVPP2_PRS_RI_L3_PROTO_MASK); 1853 1854 /* Update shadow table and hw entry */ 1855 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1856 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1857 priv->prs_shadow[pe.index].finish = false; 1858 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 1859 MVPP2_PRS_RI_L3_PROTO_MASK); 1860 mvpp2_prs_hw_write(priv, &pe); 1861 1862 /* Ethertype: IPv6 without options */ 1863 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1864 MVPP2_PE_LAST_FREE_TID); 1865 if (tid < 0) 1866 return tid; 1867 1868 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1869 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1870 pe.index = tid; 1871 1872 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 1873 1874 /* Skip DIP of IPV6 header */ 1875 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 1876 MVPP2_MAX_L3_ADDR_SIZE, 1877 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1878 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 1879 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 1880 MVPP2_PRS_RI_L3_PROTO_MASK); 1881 /* Set L3 offset */ 1882 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1883 MVPP2_ETH_TYPE_LEN, 1884 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1885 1886 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1887 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1888 priv->prs_shadow[pe.index].finish = false; 1889 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 1890 MVPP2_PRS_RI_L3_PROTO_MASK); 1891 mvpp2_prs_hw_write(priv, &pe); 1892 1893 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 1894 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1895 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1896 pe.index = MVPP2_PE_ETH_TYPE_UN; 1897 1898 /* Unmask all ports */ 1899 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1900 1901 /* Generate flow in the next iteration*/ 1902 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1903 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1904 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 1905 MVPP2_PRS_RI_L3_PROTO_MASK); 1906 /* Set L3 offset even it's unknown L3 */ 1907 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 1908 MVPP2_ETH_TYPE_LEN, 1909 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1910 1911 /* Update shadow table and hw entry */ 1912 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 1913 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 1914 priv->prs_shadow[pe.index].finish = true; 1915 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 1916 MVPP2_PRS_RI_L3_PROTO_MASK); 1917 mvpp2_prs_hw_write(priv, &pe); 1918 1919 return 0; 1920 } 1921 1922 /* Parser default initialization */ 1923 static int mvpp2_prs_default_init(struct udevice *dev, 1924 struct mvpp2 *priv) 1925 { 1926 int err, index, i; 1927 1928 /* Enable tcam table */ 1929 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 1930 1931 /* Clear all tcam and sram entries */ 1932 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 1933 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1934 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1935 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 1936 1937 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 1938 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1939 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 1940 } 1941 1942 /* Invalidate all tcam entries */ 1943 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 1944 mvpp2_prs_hw_inv(priv, index); 1945 1946 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 1947 sizeof(struct mvpp2_prs_shadow), 1948 GFP_KERNEL); 1949 if (!priv->prs_shadow) 1950 return -ENOMEM; 1951 1952 /* Always start from lookup = 0 */ 1953 for (index = 0; index < MVPP2_MAX_PORTS; index++) 1954 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 1955 MVPP2_PRS_PORT_LU_MAX, 0); 1956 1957 mvpp2_prs_def_flow_init(priv); 1958 1959 mvpp2_prs_mh_init(priv); 1960 1961 mvpp2_prs_mac_init(priv); 1962 1963 err = mvpp2_prs_etype_init(priv); 1964 if (err) 1965 return err; 1966 1967 return 0; 1968 } 1969 1970 /* Compare MAC DA with tcam entry data */ 1971 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 1972 const u8 *da, unsigned char *mask) 1973 { 1974 unsigned char tcam_byte, tcam_mask; 1975 int index; 1976 1977 for (index = 0; index < ETH_ALEN; index++) { 1978 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 1979 if (tcam_mask != mask[index]) 1980 return false; 1981 1982 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 1983 return false; 1984 } 1985 1986 return true; 1987 } 1988 1989 /* Find tcam entry with matched pair <MAC DA, port> */ 1990 static struct mvpp2_prs_entry * 1991 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 1992 unsigned char *mask, int udf_type) 1993 { 1994 struct mvpp2_prs_entry *pe; 1995 int tid; 1996 1997 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1998 if (!pe) 1999 return NULL; 2000 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2001 2002 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2003 for (tid = MVPP2_PE_FIRST_FREE_TID; 2004 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2005 unsigned int entry_pmap; 2006 2007 if (!priv->prs_shadow[tid].valid || 2008 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2009 (priv->prs_shadow[tid].udf != udf_type)) 2010 continue; 2011 2012 pe->index = tid; 2013 mvpp2_prs_hw_read(priv, pe); 2014 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2015 2016 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2017 entry_pmap == pmap) 2018 return pe; 2019 } 2020 kfree(pe); 2021 2022 return NULL; 2023 } 2024 2025 /* Update parser's mac da entry */ 2026 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2027 const u8 *da, bool add) 2028 { 2029 struct mvpp2_prs_entry *pe; 2030 unsigned int pmap, len, ri; 2031 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2032 int tid; 2033 2034 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2035 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2036 MVPP2_PRS_UDF_MAC_DEF); 2037 2038 /* No such entry */ 2039 if (!pe) { 2040 if (!add) 2041 return 0; 2042 2043 /* Create new TCAM entry */ 2044 /* Find first range mac entry*/ 2045 for (tid = MVPP2_PE_FIRST_FREE_TID; 2046 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2047 if (priv->prs_shadow[tid].valid && 2048 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2049 (priv->prs_shadow[tid].udf == 2050 MVPP2_PRS_UDF_MAC_RANGE)) 2051 break; 2052 2053 /* Go through the all entries from first to last */ 2054 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2055 tid - 1); 2056 if (tid < 0) 2057 return tid; 2058 2059 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2060 if (!pe) 2061 return -1; 2062 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2063 pe->index = tid; 2064 2065 /* Mask all ports */ 2066 mvpp2_prs_tcam_port_map_set(pe, 0); 2067 } 2068 2069 /* Update port mask */ 2070 mvpp2_prs_tcam_port_set(pe, port, add); 2071 2072 /* Invalidate the entry if no ports are left enabled */ 2073 pmap = mvpp2_prs_tcam_port_map_get(pe); 2074 if (pmap == 0) { 2075 if (add) { 2076 kfree(pe); 2077 return -1; 2078 } 2079 mvpp2_prs_hw_inv(priv, pe->index); 2080 priv->prs_shadow[pe->index].valid = false; 2081 kfree(pe); 2082 return 0; 2083 } 2084 2085 /* Continue - set next lookup */ 2086 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2087 2088 /* Set match on DA */ 2089 len = ETH_ALEN; 2090 while (len--) 2091 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2092 2093 /* Set result info bits */ 2094 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2095 2096 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2097 MVPP2_PRS_RI_MAC_ME_MASK); 2098 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2099 MVPP2_PRS_RI_MAC_ME_MASK); 2100 2101 /* Shift to ethertype */ 2102 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2103 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2104 2105 /* Update shadow table and hw entry */ 2106 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2107 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2108 mvpp2_prs_hw_write(priv, pe); 2109 2110 kfree(pe); 2111 2112 return 0; 2113 } 2114 2115 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2116 { 2117 int err; 2118 2119 /* Remove old parser entry */ 2120 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2121 false); 2122 if (err) 2123 return err; 2124 2125 /* Add new parser entry */ 2126 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2127 if (err) 2128 return err; 2129 2130 /* Set addr in the device */ 2131 memcpy(port->dev_addr, da, ETH_ALEN); 2132 2133 return 0; 2134 } 2135 2136 /* Set prs flow for the port */ 2137 static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2138 { 2139 struct mvpp2_prs_entry *pe; 2140 int tid; 2141 2142 pe = mvpp2_prs_flow_find(port->priv, port->id); 2143 2144 /* Such entry not exist */ 2145 if (!pe) { 2146 /* Go through the all entires from last to first */ 2147 tid = mvpp2_prs_tcam_first_free(port->priv, 2148 MVPP2_PE_LAST_FREE_TID, 2149 MVPP2_PE_FIRST_FREE_TID); 2150 if (tid < 0) 2151 return tid; 2152 2153 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2154 if (!pe) 2155 return -ENOMEM; 2156 2157 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2158 pe->index = tid; 2159 2160 /* Set flow ID*/ 2161 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2162 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2163 2164 /* Update shadow table */ 2165 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2166 } 2167 2168 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2169 mvpp2_prs_hw_write(port->priv, pe); 2170 kfree(pe); 2171 2172 return 0; 2173 } 2174 2175 /* Classifier configuration routines */ 2176 2177 /* Update classification flow table registers */ 2178 static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2179 struct mvpp2_cls_flow_entry *fe) 2180 { 2181 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2182 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2183 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2184 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2185 } 2186 2187 /* Update classification lookup table register */ 2188 static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2189 struct mvpp2_cls_lookup_entry *le) 2190 { 2191 u32 val; 2192 2193 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2194 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2195 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2196 } 2197 2198 /* Classifier default initialization */ 2199 static void mvpp2_cls_init(struct mvpp2 *priv) 2200 { 2201 struct mvpp2_cls_lookup_entry le; 2202 struct mvpp2_cls_flow_entry fe; 2203 int index; 2204 2205 /* Enable classifier */ 2206 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2207 2208 /* Clear classifier flow table */ 2209 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2210 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2211 fe.index = index; 2212 mvpp2_cls_flow_write(priv, &fe); 2213 } 2214 2215 /* Clear classifier lookup table */ 2216 le.data = 0; 2217 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2218 le.lkpid = index; 2219 le.way = 0; 2220 mvpp2_cls_lookup_write(priv, &le); 2221 2222 le.way = 1; 2223 mvpp2_cls_lookup_write(priv, &le); 2224 } 2225 } 2226 2227 static void mvpp2_cls_port_config(struct mvpp2_port *port) 2228 { 2229 struct mvpp2_cls_lookup_entry le; 2230 u32 val; 2231 2232 /* Set way for the port */ 2233 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2234 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2235 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2236 2237 /* Pick the entry to be accessed in lookup ID decoding table 2238 * according to the way and lkpid. 2239 */ 2240 le.lkpid = port->id; 2241 le.way = 0; 2242 le.data = 0; 2243 2244 /* Set initial CPU queue for receiving packets */ 2245 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2246 le.data |= port->first_rxq; 2247 2248 /* Disable classification engines */ 2249 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2250 2251 /* Update lookup ID table entry */ 2252 mvpp2_cls_lookup_write(port->priv, &le); 2253 } 2254 2255 /* Set CPU queue number for oversize packets */ 2256 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2257 { 2258 u32 val; 2259 2260 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2261 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2262 2263 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2264 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2265 2266 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2267 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2268 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2269 } 2270 2271 /* Buffer Manager configuration routines */ 2272 2273 /* Create pool */ 2274 static int mvpp2_bm_pool_create(struct udevice *dev, 2275 struct mvpp2 *priv, 2276 struct mvpp2_bm_pool *bm_pool, int size) 2277 { 2278 u32 val; 2279 2280 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2281 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2282 if (!bm_pool->virt_addr) 2283 return -ENOMEM; 2284 2285 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2286 MVPP2_BM_POOL_PTR_ALIGN)) { 2287 dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", 2288 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2289 return -ENOMEM; 2290 } 2291 2292 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2293 bm_pool->dma_addr); 2294 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2295 2296 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2297 val |= MVPP2_BM_START_MASK; 2298 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2299 2300 bm_pool->type = MVPP2_BM_FREE; 2301 bm_pool->size = size; 2302 bm_pool->pkt_size = 0; 2303 bm_pool->buf_num = 0; 2304 2305 return 0; 2306 } 2307 2308 /* Set pool buffer size */ 2309 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2310 struct mvpp2_bm_pool *bm_pool, 2311 int buf_size) 2312 { 2313 u32 val; 2314 2315 bm_pool->buf_size = buf_size; 2316 2317 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2318 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2319 } 2320 2321 /* Free all buffers from the pool */ 2322 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2323 struct mvpp2_bm_pool *bm_pool) 2324 { 2325 bm_pool->buf_num = 0; 2326 } 2327 2328 /* Cleanup pool */ 2329 static int mvpp2_bm_pool_destroy(struct udevice *dev, 2330 struct mvpp2 *priv, 2331 struct mvpp2_bm_pool *bm_pool) 2332 { 2333 u32 val; 2334 2335 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2336 if (bm_pool->buf_num) { 2337 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2338 return 0; 2339 } 2340 2341 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2342 val |= MVPP2_BM_STOP_MASK; 2343 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2344 2345 return 0; 2346 } 2347 2348 static int mvpp2_bm_pools_init(struct udevice *dev, 2349 struct mvpp2 *priv) 2350 { 2351 int i, err, size; 2352 struct mvpp2_bm_pool *bm_pool; 2353 2354 /* Create all pools with maximum size */ 2355 size = MVPP2_BM_POOL_SIZE_MAX; 2356 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2357 bm_pool = &priv->bm_pools[i]; 2358 bm_pool->id = i; 2359 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2360 if (err) 2361 goto err_unroll_pools; 2362 mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0); 2363 } 2364 return 0; 2365 2366 err_unroll_pools: 2367 dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size); 2368 for (i = i - 1; i >= 0; i--) 2369 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2370 return err; 2371 } 2372 2373 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2374 { 2375 int i, err; 2376 2377 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2378 /* Mask BM all interrupts */ 2379 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2380 /* Clear BM cause register */ 2381 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2382 } 2383 2384 /* Allocate and initialize BM pools */ 2385 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2386 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2387 if (!priv->bm_pools) 2388 return -ENOMEM; 2389 2390 err = mvpp2_bm_pools_init(dev, priv); 2391 if (err < 0) 2392 return err; 2393 return 0; 2394 } 2395 2396 /* Attach long pool to rxq */ 2397 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2398 int lrxq, int long_pool) 2399 { 2400 u32 val; 2401 int prxq; 2402 2403 /* Get queue physical ID */ 2404 prxq = port->rxqs[lrxq]->id; 2405 2406 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2407 val &= ~MVPP2_RXQ_POOL_LONG_MASK; 2408 val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & 2409 MVPP2_RXQ_POOL_LONG_MASK); 2410 2411 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2412 } 2413 2414 /* Set pool number in a BM cookie */ 2415 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2416 { 2417 u32 bm; 2418 2419 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2420 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2421 2422 return bm; 2423 } 2424 2425 /* Get pool number from a BM cookie */ 2426 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2427 { 2428 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2429 } 2430 2431 /* Release buffer to BM */ 2432 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2433 dma_addr_t buf_dma_addr, 2434 unsigned long buf_phys_addr) 2435 { 2436 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2437 * returned in the "cookie" field of the RX 2438 * descriptor. Instead of storing the virtual address, we 2439 * store the physical address 2440 */ 2441 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2442 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2443 } 2444 2445 /* Refill BM pool */ 2446 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2447 dma_addr_t dma_addr, 2448 phys_addr_t phys_addr) 2449 { 2450 int pool = mvpp2_bm_cookie_pool_get(bm); 2451 2452 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2453 } 2454 2455 /* Allocate buffers for the pool */ 2456 static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2457 struct mvpp2_bm_pool *bm_pool, int buf_num) 2458 { 2459 int i; 2460 2461 if (buf_num < 0 || 2462 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2463 netdev_err(port->dev, 2464 "cannot allocate %d buffers for pool %d\n", 2465 buf_num, bm_pool->id); 2466 return 0; 2467 } 2468 2469 for (i = 0; i < buf_num; i++) { 2470 mvpp2_bm_pool_put(port, bm_pool->id, 2471 (dma_addr_t)buffer_loc.rx_buffer[i], 2472 (unsigned long)buffer_loc.rx_buffer[i]); 2473 2474 } 2475 2476 /* Update BM driver with number of buffers added to pool */ 2477 bm_pool->buf_num += i; 2478 bm_pool->in_use_thresh = bm_pool->buf_num / 4; 2479 2480 return i; 2481 } 2482 2483 /* Notify the driver that BM pool is being used as specific type and return the 2484 * pool pointer on success 2485 */ 2486 static struct mvpp2_bm_pool * 2487 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2488 int pkt_size) 2489 { 2490 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2491 int num; 2492 2493 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2494 netdev_err(port->dev, "mixing pool types is forbidden\n"); 2495 return NULL; 2496 } 2497 2498 if (new_pool->type == MVPP2_BM_FREE) 2499 new_pool->type = type; 2500 2501 /* Allocate buffers in case BM pool is used as long pool, but packet 2502 * size doesn't match MTU or BM pool hasn't being used yet 2503 */ 2504 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2505 (new_pool->pkt_size == 0)) { 2506 int pkts_num; 2507 2508 /* Set default buffer number or free all the buffers in case 2509 * the pool is not empty 2510 */ 2511 pkts_num = new_pool->buf_num; 2512 if (pkts_num == 0) 2513 pkts_num = type == MVPP2_BM_SWF_LONG ? 2514 MVPP2_BM_LONG_BUF_NUM : 2515 MVPP2_BM_SHORT_BUF_NUM; 2516 else 2517 mvpp2_bm_bufs_free(NULL, 2518 port->priv, new_pool); 2519 2520 new_pool->pkt_size = pkt_size; 2521 2522 /* Allocate buffers for this pool */ 2523 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2524 if (num != pkts_num) { 2525 dev_err(dev, "pool %d: %d of %d allocated\n", 2526 new_pool->id, num, pkts_num); 2527 return NULL; 2528 } 2529 } 2530 2531 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 2532 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 2533 2534 return new_pool; 2535 } 2536 2537 /* Initialize pools for swf */ 2538 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2539 { 2540 int rxq; 2541 2542 if (!port->pool_long) { 2543 port->pool_long = 2544 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2545 MVPP2_BM_SWF_LONG, 2546 port->pkt_size); 2547 if (!port->pool_long) 2548 return -ENOMEM; 2549 2550 port->pool_long->port_map |= (1 << port->id); 2551 2552 for (rxq = 0; rxq < rxq_number; rxq++) 2553 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2554 } 2555 2556 return 0; 2557 } 2558 2559 /* Port configuration routines */ 2560 2561 static void mvpp2_port_mii_set(struct mvpp2_port *port) 2562 { 2563 u32 val; 2564 2565 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2566 2567 switch (port->phy_interface) { 2568 case PHY_INTERFACE_MODE_SGMII: 2569 val |= MVPP2_GMAC_INBAND_AN_MASK; 2570 break; 2571 case PHY_INTERFACE_MODE_RGMII: 2572 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2573 default: 2574 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2575 } 2576 2577 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2578 } 2579 2580 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2581 { 2582 u32 val; 2583 2584 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2585 val |= MVPP2_GMAC_FC_ADV_EN; 2586 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2587 } 2588 2589 static void mvpp2_port_enable(struct mvpp2_port *port) 2590 { 2591 u32 val; 2592 2593 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2594 val |= MVPP2_GMAC_PORT_EN_MASK; 2595 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2596 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2597 } 2598 2599 static void mvpp2_port_disable(struct mvpp2_port *port) 2600 { 2601 u32 val; 2602 2603 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2604 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2605 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2606 } 2607 2608 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2609 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2610 { 2611 u32 val; 2612 2613 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2614 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2615 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2616 } 2617 2618 /* Configure loopback port */ 2619 static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2620 { 2621 u32 val; 2622 2623 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2624 2625 if (port->speed == 1000) 2626 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2627 else 2628 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2629 2630 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) 2631 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2632 else 2633 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2634 2635 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2636 } 2637 2638 static void mvpp2_port_reset(struct mvpp2_port *port) 2639 { 2640 u32 val; 2641 2642 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2643 ~MVPP2_GMAC_PORT_RESET_MASK; 2644 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2645 2646 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2647 MVPP2_GMAC_PORT_RESET_MASK) 2648 continue; 2649 } 2650 2651 /* Change maximum receive size of the port */ 2652 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2653 { 2654 u32 val; 2655 2656 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2657 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2658 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2659 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2660 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2661 } 2662 2663 /* Set defaults to the MVPP2 port */ 2664 static void mvpp2_defaults_set(struct mvpp2_port *port) 2665 { 2666 int tx_port_num, val, queue, ptxq, lrxq; 2667 2668 /* Configure port to loopback if needed */ 2669 if (port->flags & MVPP2_F_LOOPBACK) 2670 mvpp2_port_loopback_set(port); 2671 2672 /* Update TX FIFO MIN Threshold */ 2673 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2674 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 2675 /* Min. TX threshold must be less than minimal packet length */ 2676 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 2677 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 2678 2679 /* Disable Legacy WRR, Disable EJP, Release from reset */ 2680 tx_port_num = mvpp2_egress_port(port); 2681 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 2682 tx_port_num); 2683 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 2684 2685 /* Close bandwidth for all queues */ 2686 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 2687 ptxq = mvpp2_txq_phys(port->id, queue); 2688 mvpp2_write(port->priv, 2689 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 2690 } 2691 2692 /* Set refill period to 1 usec, refill tokens 2693 * and bucket size to maximum 2694 */ 2695 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 2696 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 2697 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 2698 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 2699 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 2700 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 2701 val = MVPP2_TXP_TOKEN_SIZE_MAX; 2702 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2703 2704 /* Set MaximumLowLatencyPacketSize value to 256 */ 2705 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 2706 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 2707 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 2708 2709 /* Enable Rx cache snoop */ 2710 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 2711 queue = port->rxqs[lrxq]->id; 2712 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2713 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 2714 MVPP2_SNOOP_BUF_HDR_MASK; 2715 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2716 } 2717 } 2718 2719 /* Enable/disable receiving packets */ 2720 static void mvpp2_ingress_enable(struct mvpp2_port *port) 2721 { 2722 u32 val; 2723 int lrxq, queue; 2724 2725 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 2726 queue = port->rxqs[lrxq]->id; 2727 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2728 val &= ~MVPP2_RXQ_DISABLE_MASK; 2729 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2730 } 2731 } 2732 2733 static void mvpp2_ingress_disable(struct mvpp2_port *port) 2734 { 2735 u32 val; 2736 int lrxq, queue; 2737 2738 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 2739 queue = port->rxqs[lrxq]->id; 2740 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 2741 val |= MVPP2_RXQ_DISABLE_MASK; 2742 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 2743 } 2744 } 2745 2746 /* Enable transmit via physical egress queue 2747 * - HW starts take descriptors from DRAM 2748 */ 2749 static void mvpp2_egress_enable(struct mvpp2_port *port) 2750 { 2751 u32 qmap; 2752 int queue; 2753 int tx_port_num = mvpp2_egress_port(port); 2754 2755 /* Enable all initialized TXs. */ 2756 qmap = 0; 2757 for (queue = 0; queue < txq_number; queue++) { 2758 struct mvpp2_tx_queue *txq = port->txqs[queue]; 2759 2760 if (txq->descs != NULL) 2761 qmap |= (1 << queue); 2762 } 2763 2764 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2765 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 2766 } 2767 2768 /* Disable transmit via physical egress queue 2769 * - HW doesn't take descriptors from DRAM 2770 */ 2771 static void mvpp2_egress_disable(struct mvpp2_port *port) 2772 { 2773 u32 reg_data; 2774 int delay; 2775 int tx_port_num = mvpp2_egress_port(port); 2776 2777 /* Issue stop command for active channels only */ 2778 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2779 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 2780 MVPP2_TXP_SCHED_ENQ_MASK; 2781 if (reg_data != 0) 2782 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 2783 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 2784 2785 /* Wait for all Tx activity to terminate. */ 2786 delay = 0; 2787 do { 2788 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 2789 netdev_warn(port->dev, 2790 "Tx stop timed out, status=0x%08x\n", 2791 reg_data); 2792 break; 2793 } 2794 mdelay(1); 2795 delay++; 2796 2797 /* Check port TX Command register that all 2798 * Tx queues are stopped 2799 */ 2800 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 2801 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 2802 } 2803 2804 /* Rx descriptors helper methods */ 2805 2806 /* Get number of Rx descriptors occupied by received packets */ 2807 static inline int 2808 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 2809 { 2810 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 2811 2812 return val & MVPP2_RXQ_OCCUPIED_MASK; 2813 } 2814 2815 /* Update Rx queue status with the number of occupied and available 2816 * Rx descriptor slots. 2817 */ 2818 static inline void 2819 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 2820 int used_count, int free_count) 2821 { 2822 /* Decrement the number of used descriptors and increment count 2823 * increment the number of free descriptors. 2824 */ 2825 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 2826 2827 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 2828 } 2829 2830 /* Get pointer to next RX descriptor to be processed by SW */ 2831 static inline struct mvpp2_rx_desc * 2832 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 2833 { 2834 int rx_desc = rxq->next_desc_to_proc; 2835 2836 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 2837 prefetch(rxq->descs + rxq->next_desc_to_proc); 2838 return rxq->descs + rx_desc; 2839 } 2840 2841 /* Set rx queue offset */ 2842 static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 2843 int prxq, int offset) 2844 { 2845 u32 val; 2846 2847 /* Convert offset from bytes to units of 32 bytes */ 2848 offset = offset >> 5; 2849 2850 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2851 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 2852 2853 /* Offset is in */ 2854 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 2855 MVPP2_RXQ_PACKET_OFFSET_MASK); 2856 2857 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2858 } 2859 2860 /* Obtain BM cookie information from descriptor */ 2861 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 2862 struct mvpp2_rx_desc *rx_desc) 2863 { 2864 int cpu = smp_processor_id(); 2865 int pool; 2866 2867 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 2868 MVPP2_RXD_BM_POOL_ID_MASK) >> 2869 MVPP2_RXD_BM_POOL_ID_OFFS; 2870 2871 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 2872 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 2873 } 2874 2875 /* Tx descriptors helper methods */ 2876 2877 /* Get number of Tx descriptors waiting to be transmitted by HW */ 2878 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 2879 struct mvpp2_tx_queue *txq) 2880 { 2881 u32 val; 2882 2883 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 2884 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 2885 2886 return val & MVPP2_TXQ_PENDING_MASK; 2887 } 2888 2889 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 2890 static struct mvpp2_tx_desc * 2891 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 2892 { 2893 int tx_desc = txq->next_desc_to_proc; 2894 2895 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 2896 return txq->descs + tx_desc; 2897 } 2898 2899 /* Update HW with number of aggregated Tx descriptors to be sent */ 2900 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 2901 { 2902 /* aggregated access - relevant TXQ number is written in TX desc */ 2903 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 2904 } 2905 2906 /* Get number of sent descriptors and decrement counter. 2907 * The number of sent descriptors is returned. 2908 * Per-CPU access 2909 */ 2910 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 2911 struct mvpp2_tx_queue *txq) 2912 { 2913 u32 val; 2914 2915 /* Reading status reg resets transmitted descriptor counter */ 2916 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 2917 2918 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 2919 MVPP2_TRANSMITTED_COUNT_OFFSET; 2920 } 2921 2922 static void mvpp2_txq_sent_counter_clear(void *arg) 2923 { 2924 struct mvpp2_port *port = arg; 2925 int queue; 2926 2927 for (queue = 0; queue < txq_number; queue++) { 2928 int id = port->txqs[queue]->id; 2929 2930 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 2931 } 2932 } 2933 2934 /* Set max sizes for Tx queues */ 2935 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 2936 { 2937 u32 val, size, mtu; 2938 int txq, tx_port_num; 2939 2940 mtu = port->pkt_size * 8; 2941 if (mtu > MVPP2_TXP_MTU_MAX) 2942 mtu = MVPP2_TXP_MTU_MAX; 2943 2944 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 2945 mtu = 3 * mtu; 2946 2947 /* Indirect access to registers */ 2948 tx_port_num = mvpp2_egress_port(port); 2949 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 2950 2951 /* Set MTU */ 2952 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 2953 val &= ~MVPP2_TXP_MTU_MAX; 2954 val |= mtu; 2955 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 2956 2957 /* TXP token size and all TXQs token size must be larger that MTU */ 2958 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 2959 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 2960 if (size < mtu) { 2961 size = mtu; 2962 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 2963 val |= size; 2964 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 2965 } 2966 2967 for (txq = 0; txq < txq_number; txq++) { 2968 val = mvpp2_read(port->priv, 2969 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 2970 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 2971 2972 if (size < mtu) { 2973 size = mtu; 2974 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 2975 val |= size; 2976 mvpp2_write(port->priv, 2977 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 2978 val); 2979 } 2980 } 2981 } 2982 2983 /* Free Tx queue skbuffs */ 2984 static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 2985 struct mvpp2_tx_queue *txq, 2986 struct mvpp2_txq_pcpu *txq_pcpu, int num) 2987 { 2988 int i; 2989 2990 for (i = 0; i < num; i++) 2991 mvpp2_txq_inc_get(txq_pcpu); 2992 } 2993 2994 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 2995 u32 cause) 2996 { 2997 int queue = fls(cause) - 1; 2998 2999 return port->rxqs[queue]; 3000 } 3001 3002 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 3003 u32 cause) 3004 { 3005 int queue = fls(cause) - 1; 3006 3007 return port->txqs[queue]; 3008 } 3009 3010 /* Rx/Tx queue initialization/cleanup methods */ 3011 3012 /* Allocate and initialize descriptors for aggr TXQ */ 3013 static int mvpp2_aggr_txq_init(struct udevice *dev, 3014 struct mvpp2_tx_queue *aggr_txq, 3015 int desc_num, int cpu, 3016 struct mvpp2 *priv) 3017 { 3018 /* Allocate memory for TX descriptors */ 3019 aggr_txq->descs = buffer_loc.aggr_tx_descs; 3020 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 3021 if (!aggr_txq->descs) 3022 return -ENOMEM; 3023 3024 /* Make sure descriptor address is cache line size aligned */ 3025 BUG_ON(aggr_txq->descs != 3026 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 3027 3028 aggr_txq->last_desc = aggr_txq->size - 1; 3029 3030 /* Aggr TXQ no reset WA */ 3031 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 3032 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 3033 3034 /* Set Tx descriptors queue starting address */ 3035 /* indirect access */ 3036 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), 3037 aggr_txq->descs_dma); 3038 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 3039 3040 return 0; 3041 } 3042 3043 /* Create a specified Rx queue */ 3044 static int mvpp2_rxq_init(struct mvpp2_port *port, 3045 struct mvpp2_rx_queue *rxq) 3046 3047 { 3048 rxq->size = port->rx_ring_size; 3049 3050 /* Allocate memory for RX descriptors */ 3051 rxq->descs = buffer_loc.rx_descs; 3052 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 3053 if (!rxq->descs) 3054 return -ENOMEM; 3055 3056 BUG_ON(rxq->descs != 3057 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 3058 3059 rxq->last_desc = rxq->size - 1; 3060 3061 /* Zero occupied and non-occupied counters - direct access */ 3062 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 3063 3064 /* Set Rx descriptors queue starting address - indirect access */ 3065 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 3066 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_dma); 3067 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 3068 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 3069 3070 /* Set Offset */ 3071 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 3072 3073 /* Add number of descriptors ready for receiving packets */ 3074 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 3075 3076 return 0; 3077 } 3078 3079 /* Push packets received by the RXQ to BM pool */ 3080 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 3081 struct mvpp2_rx_queue *rxq) 3082 { 3083 int rx_received, i; 3084 3085 rx_received = mvpp2_rxq_received(port, rxq->id); 3086 if (!rx_received) 3087 return; 3088 3089 for (i = 0; i < rx_received; i++) { 3090 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 3091 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 3092 3093 mvpp2_pool_refill(port, bm, 3094 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 3095 mvpp2_rxdesc_cookie_get(port, rx_desc)); 3096 } 3097 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 3098 } 3099 3100 /* Cleanup Rx queue */ 3101 static void mvpp2_rxq_deinit(struct mvpp2_port *port, 3102 struct mvpp2_rx_queue *rxq) 3103 { 3104 mvpp2_rxq_drop_pkts(port, rxq); 3105 3106 rxq->descs = NULL; 3107 rxq->last_desc = 0; 3108 rxq->next_desc_to_proc = 0; 3109 rxq->descs_dma = 0; 3110 3111 /* Clear Rx descriptors queue starting address and size; 3112 * free descriptor number 3113 */ 3114 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 3115 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 3116 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 3117 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 3118 } 3119 3120 /* Create and initialize a Tx queue */ 3121 static int mvpp2_txq_init(struct mvpp2_port *port, 3122 struct mvpp2_tx_queue *txq) 3123 { 3124 u32 val; 3125 int cpu, desc, desc_per_txq, tx_port_num; 3126 struct mvpp2_txq_pcpu *txq_pcpu; 3127 3128 txq->size = port->tx_ring_size; 3129 3130 /* Allocate memory for Tx descriptors */ 3131 txq->descs = buffer_loc.tx_descs; 3132 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 3133 if (!txq->descs) 3134 return -ENOMEM; 3135 3136 /* Make sure descriptor address is cache line size aligned */ 3137 BUG_ON(txq->descs != 3138 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 3139 3140 txq->last_desc = txq->size - 1; 3141 3142 /* Set Tx descriptors queue starting address - indirect access */ 3143 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3144 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 3145 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 3146 MVPP2_TXQ_DESC_SIZE_MASK); 3147 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 3148 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 3149 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 3150 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3151 val &= ~MVPP2_TXQ_PENDING_MASK; 3152 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 3153 3154 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 3155 * for each existing TXQ. 3156 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 3157 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 3158 */ 3159 desc_per_txq = 16; 3160 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 3161 (txq->log_id * desc_per_txq); 3162 3163 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 3164 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 3165 MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); 3166 3167 /* WRR / EJP configuration - indirect access */ 3168 tx_port_num = mvpp2_egress_port(port); 3169 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3170 3171 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 3172 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 3173 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 3174 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 3175 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 3176 3177 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 3178 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 3179 val); 3180 3181 for_each_present_cpu(cpu) { 3182 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 3183 txq_pcpu->size = txq->size; 3184 } 3185 3186 return 0; 3187 } 3188 3189 /* Free allocated TXQ resources */ 3190 static void mvpp2_txq_deinit(struct mvpp2_port *port, 3191 struct mvpp2_tx_queue *txq) 3192 { 3193 txq->descs = NULL; 3194 txq->last_desc = 0; 3195 txq->next_desc_to_proc = 0; 3196 txq->descs_dma = 0; 3197 3198 /* Set minimum bandwidth for disabled TXQs */ 3199 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 3200 3201 /* Set Tx descriptors queue starting address and size */ 3202 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3203 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 3204 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 3205 } 3206 3207 /* Cleanup Tx ports */ 3208 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 3209 { 3210 struct mvpp2_txq_pcpu *txq_pcpu; 3211 int delay, pending, cpu; 3212 u32 val; 3213 3214 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3215 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 3216 val |= MVPP2_TXQ_DRAIN_EN_MASK; 3217 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 3218 3219 /* The napi queue has been stopped so wait for all packets 3220 * to be transmitted. 3221 */ 3222 delay = 0; 3223 do { 3224 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 3225 netdev_warn(port->dev, 3226 "port %d: cleaning queue %d timed out\n", 3227 port->id, txq->log_id); 3228 break; 3229 } 3230 mdelay(1); 3231 delay++; 3232 3233 pending = mvpp2_txq_pend_desc_num_get(port, txq); 3234 } while (pending); 3235 3236 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 3237 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 3238 3239 for_each_present_cpu(cpu) { 3240 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 3241 3242 /* Release all packets */ 3243 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 3244 3245 /* Reset queue */ 3246 txq_pcpu->count = 0; 3247 txq_pcpu->txq_put_index = 0; 3248 txq_pcpu->txq_get_index = 0; 3249 } 3250 } 3251 3252 /* Cleanup all Tx queues */ 3253 static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 3254 { 3255 struct mvpp2_tx_queue *txq; 3256 int queue; 3257 u32 val; 3258 3259 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 3260 3261 /* Reset Tx ports and delete Tx queues */ 3262 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 3263 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3264 3265 for (queue = 0; queue < txq_number; queue++) { 3266 txq = port->txqs[queue]; 3267 mvpp2_txq_clean(port, txq); 3268 mvpp2_txq_deinit(port, txq); 3269 } 3270 3271 mvpp2_txq_sent_counter_clear(port); 3272 3273 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 3274 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 3275 } 3276 3277 /* Cleanup all Rx queues */ 3278 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 3279 { 3280 int queue; 3281 3282 for (queue = 0; queue < rxq_number; queue++) 3283 mvpp2_rxq_deinit(port, port->rxqs[queue]); 3284 } 3285 3286 /* Init all Rx queues for port */ 3287 static int mvpp2_setup_rxqs(struct mvpp2_port *port) 3288 { 3289 int queue, err; 3290 3291 for (queue = 0; queue < rxq_number; queue++) { 3292 err = mvpp2_rxq_init(port, port->rxqs[queue]); 3293 if (err) 3294 goto err_cleanup; 3295 } 3296 return 0; 3297 3298 err_cleanup: 3299 mvpp2_cleanup_rxqs(port); 3300 return err; 3301 } 3302 3303 /* Init all tx queues for port */ 3304 static int mvpp2_setup_txqs(struct mvpp2_port *port) 3305 { 3306 struct mvpp2_tx_queue *txq; 3307 int queue, err; 3308 3309 for (queue = 0; queue < txq_number; queue++) { 3310 txq = port->txqs[queue]; 3311 err = mvpp2_txq_init(port, txq); 3312 if (err) 3313 goto err_cleanup; 3314 } 3315 3316 mvpp2_txq_sent_counter_clear(port); 3317 return 0; 3318 3319 err_cleanup: 3320 mvpp2_cleanup_txqs(port); 3321 return err; 3322 } 3323 3324 /* Adjust link */ 3325 static void mvpp2_link_event(struct mvpp2_port *port) 3326 { 3327 struct phy_device *phydev = port->phy_dev; 3328 int status_change = 0; 3329 u32 val; 3330 3331 if (phydev->link) { 3332 if ((port->speed != phydev->speed) || 3333 (port->duplex != phydev->duplex)) { 3334 u32 val; 3335 3336 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3337 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 3338 MVPP2_GMAC_CONFIG_GMII_SPEED | 3339 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3340 MVPP2_GMAC_AN_SPEED_EN | 3341 MVPP2_GMAC_AN_DUPLEX_EN); 3342 3343 if (phydev->duplex) 3344 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 3345 3346 if (phydev->speed == SPEED_1000) 3347 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 3348 else if (phydev->speed == SPEED_100) 3349 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 3350 3351 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3352 3353 port->duplex = phydev->duplex; 3354 port->speed = phydev->speed; 3355 } 3356 } 3357 3358 if (phydev->link != port->link) { 3359 if (!phydev->link) { 3360 port->duplex = -1; 3361 port->speed = 0; 3362 } 3363 3364 port->link = phydev->link; 3365 status_change = 1; 3366 } 3367 3368 if (status_change) { 3369 if (phydev->link) { 3370 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3371 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 3372 MVPP2_GMAC_FORCE_LINK_DOWN); 3373 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3374 mvpp2_egress_enable(port); 3375 mvpp2_ingress_enable(port); 3376 } else { 3377 mvpp2_ingress_disable(port); 3378 mvpp2_egress_disable(port); 3379 } 3380 } 3381 } 3382 3383 /* Main RX/TX processing routines */ 3384 3385 /* Display more error info */ 3386 static void mvpp2_rx_error(struct mvpp2_port *port, 3387 struct mvpp2_rx_desc *rx_desc) 3388 { 3389 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 3390 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 3391 3392 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 3393 case MVPP2_RXD_ERR_CRC: 3394 netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n", 3395 status, sz); 3396 break; 3397 case MVPP2_RXD_ERR_OVERRUN: 3398 netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n", 3399 status, sz); 3400 break; 3401 case MVPP2_RXD_ERR_RESOURCE: 3402 netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n", 3403 status, sz); 3404 break; 3405 } 3406 } 3407 3408 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 3409 static int mvpp2_rx_refill(struct mvpp2_port *port, 3410 struct mvpp2_bm_pool *bm_pool, 3411 u32 bm, dma_addr_t dma_addr) 3412 { 3413 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 3414 return 0; 3415 } 3416 3417 /* Set hw internals when starting port */ 3418 static void mvpp2_start_dev(struct mvpp2_port *port) 3419 { 3420 mvpp2_gmac_max_rx_size_set(port); 3421 mvpp2_txp_max_tx_size_set(port); 3422 3423 mvpp2_port_enable(port); 3424 } 3425 3426 /* Set hw internals when stopping port */ 3427 static void mvpp2_stop_dev(struct mvpp2_port *port) 3428 { 3429 /* Stop new packets from arriving to RXQs */ 3430 mvpp2_ingress_disable(port); 3431 3432 mvpp2_egress_disable(port); 3433 mvpp2_port_disable(port); 3434 } 3435 3436 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 3437 { 3438 struct phy_device *phy_dev; 3439 3440 if (!port->init || port->link == 0) { 3441 phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev, 3442 port->phy_interface); 3443 port->phy_dev = phy_dev; 3444 if (!phy_dev) { 3445 netdev_err(port->dev, "cannot connect to phy\n"); 3446 return -ENODEV; 3447 } 3448 phy_dev->supported &= PHY_GBIT_FEATURES; 3449 phy_dev->advertising = phy_dev->supported; 3450 3451 port->phy_dev = phy_dev; 3452 port->link = 0; 3453 port->duplex = 0; 3454 port->speed = 0; 3455 3456 phy_config(phy_dev); 3457 phy_startup(phy_dev); 3458 if (!phy_dev->link) { 3459 printf("%s: No link\n", phy_dev->dev->name); 3460 return -1; 3461 } 3462 3463 port->init = 1; 3464 } else { 3465 mvpp2_egress_enable(port); 3466 mvpp2_ingress_enable(port); 3467 } 3468 3469 return 0; 3470 } 3471 3472 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 3473 { 3474 unsigned char mac_bcast[ETH_ALEN] = { 3475 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 3476 int err; 3477 3478 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 3479 if (err) { 3480 netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 3481 return err; 3482 } 3483 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 3484 port->dev_addr, true); 3485 if (err) { 3486 netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 3487 return err; 3488 } 3489 err = mvpp2_prs_def_flow(port); 3490 if (err) { 3491 netdev_err(dev, "mvpp2_prs_def_flow failed\n"); 3492 return err; 3493 } 3494 3495 /* Allocate the Rx/Tx queues */ 3496 err = mvpp2_setup_rxqs(port); 3497 if (err) { 3498 netdev_err(port->dev, "cannot allocate Rx queues\n"); 3499 return err; 3500 } 3501 3502 err = mvpp2_setup_txqs(port); 3503 if (err) { 3504 netdev_err(port->dev, "cannot allocate Tx queues\n"); 3505 return err; 3506 } 3507 3508 err = mvpp2_phy_connect(dev, port); 3509 if (err < 0) 3510 return err; 3511 3512 mvpp2_link_event(port); 3513 3514 mvpp2_start_dev(port); 3515 3516 return 0; 3517 } 3518 3519 /* No Device ops here in U-Boot */ 3520 3521 /* Driver initialization */ 3522 3523 static void mvpp2_port_power_up(struct mvpp2_port *port) 3524 { 3525 mvpp2_port_mii_set(port); 3526 mvpp2_port_periodic_xon_disable(port); 3527 mvpp2_port_fc_adv_enable(port); 3528 mvpp2_port_reset(port); 3529 } 3530 3531 /* Initialize port HW */ 3532 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 3533 { 3534 struct mvpp2 *priv = port->priv; 3535 struct mvpp2_txq_pcpu *txq_pcpu; 3536 int queue, cpu, err; 3537 3538 if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM) 3539 return -EINVAL; 3540 3541 /* Disable port */ 3542 mvpp2_egress_disable(port); 3543 mvpp2_port_disable(port); 3544 3545 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 3546 GFP_KERNEL); 3547 if (!port->txqs) 3548 return -ENOMEM; 3549 3550 /* Associate physical Tx queues to this port and initialize. 3551 * The mapping is predefined. 3552 */ 3553 for (queue = 0; queue < txq_number; queue++) { 3554 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 3555 struct mvpp2_tx_queue *txq; 3556 3557 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 3558 if (!txq) 3559 return -ENOMEM; 3560 3561 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 3562 GFP_KERNEL); 3563 if (!txq->pcpu) 3564 return -ENOMEM; 3565 3566 txq->id = queue_phy_id; 3567 txq->log_id = queue; 3568 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 3569 for_each_present_cpu(cpu) { 3570 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 3571 txq_pcpu->cpu = cpu; 3572 } 3573 3574 port->txqs[queue] = txq; 3575 } 3576 3577 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 3578 GFP_KERNEL); 3579 if (!port->rxqs) 3580 return -ENOMEM; 3581 3582 /* Allocate and initialize Rx queue for this port */ 3583 for (queue = 0; queue < rxq_number; queue++) { 3584 struct mvpp2_rx_queue *rxq; 3585 3586 /* Map physical Rx queue to port's logical Rx queue */ 3587 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 3588 if (!rxq) 3589 return -ENOMEM; 3590 /* Map this Rx queue to a physical queue */ 3591 rxq->id = port->first_rxq + queue; 3592 rxq->port = port->id; 3593 rxq->logic_rxq = queue; 3594 3595 port->rxqs[queue] = rxq; 3596 } 3597 3598 /* Configure Rx queue group interrupt for this port */ 3599 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), CONFIG_MV_ETH_RXQ); 3600 3601 /* Create Rx descriptor rings */ 3602 for (queue = 0; queue < rxq_number; queue++) { 3603 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 3604 3605 rxq->size = port->rx_ring_size; 3606 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 3607 rxq->time_coal = MVPP2_RX_COAL_USEC; 3608 } 3609 3610 mvpp2_ingress_disable(port); 3611 3612 /* Port default configuration */ 3613 mvpp2_defaults_set(port); 3614 3615 /* Port's classifier configuration */ 3616 mvpp2_cls_oversize_rxq_set(port); 3617 mvpp2_cls_port_config(port); 3618 3619 /* Provide an initial Rx packet size */ 3620 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 3621 3622 /* Initialize pools for swf */ 3623 err = mvpp2_swf_bm_pool_init(port); 3624 if (err) 3625 return err; 3626 3627 return 0; 3628 } 3629 3630 /* Ports initialization */ 3631 static int mvpp2_port_probe(struct udevice *dev, 3632 struct mvpp2_port *port, 3633 int port_node, 3634 struct mvpp2 *priv, 3635 int *next_first_rxq) 3636 { 3637 int phy_node; 3638 u32 id; 3639 u32 phyaddr; 3640 const char *phy_mode_str; 3641 int phy_mode = -1; 3642 int priv_common_regs_num = 2; 3643 int err; 3644 3645 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 3646 if (phy_node < 0) { 3647 dev_err(&pdev->dev, "missing phy\n"); 3648 return -ENODEV; 3649 } 3650 3651 phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL); 3652 if (phy_mode_str) 3653 phy_mode = phy_get_interface_by_name(phy_mode_str); 3654 if (phy_mode == -1) { 3655 dev_err(&pdev->dev, "incorrect phy mode\n"); 3656 return -EINVAL; 3657 } 3658 3659 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 3660 if (id == -1) { 3661 dev_err(&pdev->dev, "missing port-id value\n"); 3662 return -EINVAL; 3663 } 3664 3665 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0); 3666 3667 port->priv = priv; 3668 port->id = id; 3669 port->first_rxq = *next_first_rxq; 3670 port->phy_node = phy_node; 3671 port->phy_interface = phy_mode; 3672 port->phyaddr = phyaddr; 3673 3674 port->base = (void __iomem *)dev_get_addr_index(dev->parent, 3675 priv_common_regs_num 3676 + id); 3677 if (IS_ERR(port->base)) 3678 return PTR_ERR(port->base); 3679 3680 port->tx_ring_size = MVPP2_MAX_TXD; 3681 port->rx_ring_size = MVPP2_MAX_RXD; 3682 3683 err = mvpp2_port_init(dev, port); 3684 if (err < 0) { 3685 dev_err(&pdev->dev, "failed to init port %d\n", id); 3686 return err; 3687 } 3688 mvpp2_port_power_up(port); 3689 3690 /* Increment the first Rx queue number to be used by the next port */ 3691 *next_first_rxq += CONFIG_MV_ETH_RXQ; 3692 priv->port_list[id] = port; 3693 return 0; 3694 } 3695 3696 /* Initialize decoding windows */ 3697 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 3698 struct mvpp2 *priv) 3699 { 3700 u32 win_enable; 3701 int i; 3702 3703 for (i = 0; i < 6; i++) { 3704 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 3705 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 3706 3707 if (i < 4) 3708 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 3709 } 3710 3711 win_enable = 0; 3712 3713 for (i = 0; i < dram->num_cs; i++) { 3714 const struct mbus_dram_window *cs = dram->cs + i; 3715 3716 mvpp2_write(priv, MVPP2_WIN_BASE(i), 3717 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 3718 dram->mbus_dram_target_id); 3719 3720 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 3721 (cs->size - 1) & 0xffff0000); 3722 3723 win_enable |= (1 << i); 3724 } 3725 3726 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 3727 } 3728 3729 /* Initialize Rx FIFO's */ 3730 static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 3731 { 3732 int port; 3733 3734 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 3735 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 3736 MVPP2_RX_FIFO_PORT_DATA_SIZE); 3737 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 3738 MVPP2_RX_FIFO_PORT_ATTR_SIZE); 3739 } 3740 3741 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 3742 MVPP2_RX_FIFO_PORT_MIN_PKT); 3743 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 3744 } 3745 3746 /* Initialize network controller common part HW */ 3747 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 3748 { 3749 const struct mbus_dram_target_info *dram_target_info; 3750 int err, i; 3751 u32 val; 3752 3753 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 3754 if ((rxq_number > MVPP2_MAX_RXQ) || (txq_number > MVPP2_MAX_TXQ)) { 3755 dev_err(&pdev->dev, "invalid queue size parameter\n"); 3756 return -EINVAL; 3757 } 3758 3759 /* MBUS windows configuration */ 3760 dram_target_info = mvebu_mbus_dram_info(); 3761 if (dram_target_info) 3762 mvpp2_conf_mbus_windows(dram_target_info, priv); 3763 3764 /* Disable HW PHY polling */ 3765 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 3766 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 3767 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 3768 3769 /* Allocate and initialize aggregated TXQs */ 3770 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 3771 sizeof(struct mvpp2_tx_queue), 3772 GFP_KERNEL); 3773 if (!priv->aggr_txqs) 3774 return -ENOMEM; 3775 3776 for_each_present_cpu(i) { 3777 priv->aggr_txqs[i].id = i; 3778 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 3779 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 3780 MVPP2_AGGR_TXQ_SIZE, i, priv); 3781 if (err < 0) 3782 return err; 3783 } 3784 3785 /* Rx Fifo Init */ 3786 mvpp2_rx_fifo_init(priv); 3787 3788 /* Reset Rx queue group interrupt configuration */ 3789 for (i = 0; i < MVPP2_MAX_PORTS; i++) 3790 mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), 3791 CONFIG_MV_ETH_RXQ); 3792 3793 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 3794 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 3795 3796 /* Allow cache snoop when transmiting packets */ 3797 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 3798 3799 /* Buffer Manager initialization */ 3800 err = mvpp2_bm_init(dev, priv); 3801 if (err < 0) 3802 return err; 3803 3804 /* Parser default initialization */ 3805 err = mvpp2_prs_default_init(dev, priv); 3806 if (err < 0) 3807 return err; 3808 3809 /* Classifier default initialization */ 3810 mvpp2_cls_init(priv); 3811 3812 return 0; 3813 } 3814 3815 /* SMI / MDIO functions */ 3816 3817 static int smi_wait_ready(struct mvpp2 *priv) 3818 { 3819 u32 timeout = MVPP2_SMI_TIMEOUT; 3820 u32 smi_reg; 3821 3822 /* wait till the SMI is not busy */ 3823 do { 3824 /* read smi register */ 3825 smi_reg = readl(priv->lms_base + MVPP2_SMI); 3826 if (timeout-- == 0) { 3827 printf("Error: SMI busy timeout\n"); 3828 return -EFAULT; 3829 } 3830 } while (smi_reg & MVPP2_SMI_BUSY); 3831 3832 return 0; 3833 } 3834 3835 /* 3836 * mpp2_mdio_read - miiphy_read callback function. 3837 * 3838 * Returns 16bit phy register value, or 0xffff on error 3839 */ 3840 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) 3841 { 3842 struct mvpp2 *priv = bus->priv; 3843 u32 smi_reg; 3844 u32 timeout; 3845 3846 /* check parameters */ 3847 if (addr > MVPP2_PHY_ADDR_MASK) { 3848 printf("Error: Invalid PHY address %d\n", addr); 3849 return -EFAULT; 3850 } 3851 3852 if (reg > MVPP2_PHY_REG_MASK) { 3853 printf("Err: Invalid register offset %d\n", reg); 3854 return -EFAULT; 3855 } 3856 3857 /* wait till the SMI is not busy */ 3858 if (smi_wait_ready(priv) < 0) 3859 return -EFAULT; 3860 3861 /* fill the phy address and regiser offset and read opcode */ 3862 smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS) 3863 | (reg << MVPP2_SMI_REG_ADDR_OFFS) 3864 | MVPP2_SMI_OPCODE_READ; 3865 3866 /* write the smi register */ 3867 writel(smi_reg, priv->lms_base + MVPP2_SMI); 3868 3869 /* wait till read value is ready */ 3870 timeout = MVPP2_SMI_TIMEOUT; 3871 3872 do { 3873 /* read smi register */ 3874 smi_reg = readl(priv->lms_base + MVPP2_SMI); 3875 if (timeout-- == 0) { 3876 printf("Err: SMI read ready timeout\n"); 3877 return -EFAULT; 3878 } 3879 } while (!(smi_reg & MVPP2_SMI_READ_VALID)); 3880 3881 /* Wait for the data to update in the SMI register */ 3882 for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++) 3883 ; 3884 3885 return readl(priv->lms_base + MVPP2_SMI) & MVPP2_SMI_DATA_MASK; 3886 } 3887 3888 /* 3889 * mpp2_mdio_write - miiphy_write callback function. 3890 * 3891 * Returns 0 if write succeed, -EINVAL on bad parameters 3892 * -ETIME on timeout 3893 */ 3894 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, 3895 u16 value) 3896 { 3897 struct mvpp2 *priv = bus->priv; 3898 u32 smi_reg; 3899 3900 /* check parameters */ 3901 if (addr > MVPP2_PHY_ADDR_MASK) { 3902 printf("Error: Invalid PHY address %d\n", addr); 3903 return -EFAULT; 3904 } 3905 3906 if (reg > MVPP2_PHY_REG_MASK) { 3907 printf("Err: Invalid register offset %d\n", reg); 3908 return -EFAULT; 3909 } 3910 3911 /* wait till the SMI is not busy */ 3912 if (smi_wait_ready(priv) < 0) 3913 return -EFAULT; 3914 3915 /* fill the phy addr and reg offset and write opcode and data */ 3916 smi_reg = value << MVPP2_SMI_DATA_OFFS; 3917 smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS) 3918 | (reg << MVPP2_SMI_REG_ADDR_OFFS); 3919 smi_reg &= ~MVPP2_SMI_OPCODE_READ; 3920 3921 /* write the smi register */ 3922 writel(smi_reg, priv->lms_base + MVPP2_SMI); 3923 3924 return 0; 3925 } 3926 3927 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 3928 { 3929 struct mvpp2_port *port = dev_get_priv(dev); 3930 struct mvpp2_rx_desc *rx_desc; 3931 struct mvpp2_bm_pool *bm_pool; 3932 dma_addr_t dma_addr; 3933 u32 bm, rx_status; 3934 int pool, rx_bytes, err; 3935 int rx_received; 3936 struct mvpp2_rx_queue *rxq; 3937 u32 cause_rx_tx, cause_rx, cause_misc; 3938 u8 *data; 3939 3940 cause_rx_tx = mvpp2_read(port->priv, 3941 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 3942 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 3943 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 3944 if (!cause_rx_tx && !cause_misc) 3945 return 0; 3946 3947 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 3948 3949 /* Process RX packets */ 3950 cause_rx |= port->pending_cause_rx; 3951 rxq = mvpp2_get_rx_queue(port, cause_rx); 3952 3953 /* Get number of received packets and clamp the to-do */ 3954 rx_received = mvpp2_rxq_received(port, rxq->id); 3955 3956 /* Return if no packets are received */ 3957 if (!rx_received) 3958 return 0; 3959 3960 rx_desc = mvpp2_rxq_next_desc_get(rxq); 3961 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 3962 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 3963 rx_bytes -= MVPP2_MH_SIZE; 3964 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 3965 3966 bm = mvpp2_bm_cookie_build(port, rx_desc); 3967 pool = mvpp2_bm_cookie_pool_get(bm); 3968 bm_pool = &port->priv->bm_pools[pool]; 3969 3970 /* In case of an error, release the requested buffer pointer 3971 * to the Buffer Manager. This request process is controlled 3972 * by the hardware, and the information about the buffer is 3973 * comprised by the RX descriptor. 3974 */ 3975 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 3976 mvpp2_rx_error(port, rx_desc); 3977 /* Return the buffer to the pool */ 3978 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 3979 return 0; 3980 } 3981 3982 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 3983 if (err) { 3984 netdev_err(port->dev, "failed to refill BM pools\n"); 3985 return 0; 3986 } 3987 3988 /* Update Rx queue management counters */ 3989 mb(); 3990 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 3991 3992 /* give packet to stack - skip on first n bytes */ 3993 data = (u8 *)dma_addr + 2 + 32; 3994 3995 if (rx_bytes <= 0) 3996 return 0; 3997 3998 /* 3999 * No cache invalidation needed here, since the rx_buffer's are 4000 * located in a uncached memory region 4001 */ 4002 *packetp = data; 4003 4004 return rx_bytes; 4005 } 4006 4007 /* Drain Txq */ 4008 static void mvpp2_txq_drain(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, 4009 int enable) 4010 { 4011 u32 val; 4012 4013 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4014 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4015 if (enable) 4016 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4017 else 4018 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4019 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4020 } 4021 4022 static int mvpp2_send(struct udevice *dev, void *packet, int length) 4023 { 4024 struct mvpp2_port *port = dev_get_priv(dev); 4025 struct mvpp2_tx_queue *txq, *aggr_txq; 4026 struct mvpp2_tx_desc *tx_desc; 4027 int tx_done; 4028 int timeout; 4029 4030 txq = port->txqs[0]; 4031 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 4032 4033 /* Get a descriptor for the first part of the packet */ 4034 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 4035 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 4036 mvpp2_txdesc_size_set(port, tx_desc, length); 4037 mvpp2_txdesc_offset_set(port, tx_desc, 4038 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 4039 mvpp2_txdesc_dma_addr_set(port, tx_desc, 4040 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 4041 /* First and Last descriptor */ 4042 mvpp2_txdesc_cmd_set(port, tx_desc, 4043 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 4044 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 4045 4046 /* Flush tx data */ 4047 flush_dcache_range((unsigned long)packet, 4048 (unsigned long)packet + ALIGN(length, PKTALIGN)); 4049 4050 /* Enable transmit */ 4051 mb(); 4052 mvpp2_aggr_txq_pend_desc_add(port, 1); 4053 4054 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4055 4056 timeout = 0; 4057 do { 4058 if (timeout++ > 10000) { 4059 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 4060 return 0; 4061 } 4062 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 4063 } while (tx_done); 4064 4065 /* Enable TXQ drain */ 4066 mvpp2_txq_drain(port, txq, 1); 4067 4068 timeout = 0; 4069 do { 4070 if (timeout++ > 10000) { 4071 printf("timeout: packet not sent\n"); 4072 return 0; 4073 } 4074 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 4075 } while (!tx_done); 4076 4077 /* Disable TXQ drain */ 4078 mvpp2_txq_drain(port, txq, 0); 4079 4080 return 0; 4081 } 4082 4083 static int mvpp2_start(struct udevice *dev) 4084 { 4085 struct eth_pdata *pdata = dev_get_platdata(dev); 4086 struct mvpp2_port *port = dev_get_priv(dev); 4087 4088 /* Load current MAC address */ 4089 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 4090 4091 /* Reconfigure parser accept the original MAC address */ 4092 mvpp2_prs_update_mac_da(port, port->dev_addr); 4093 4094 mvpp2_port_power_up(port); 4095 4096 mvpp2_open(dev, port); 4097 4098 return 0; 4099 } 4100 4101 static void mvpp2_stop(struct udevice *dev) 4102 { 4103 struct mvpp2_port *port = dev_get_priv(dev); 4104 4105 mvpp2_stop_dev(port); 4106 mvpp2_cleanup_rxqs(port); 4107 mvpp2_cleanup_txqs(port); 4108 } 4109 4110 static int mvpp2_probe(struct udevice *dev) 4111 { 4112 struct mvpp2_port *port = dev_get_priv(dev); 4113 struct mvpp2 *priv = dev_get_priv(dev->parent); 4114 int err; 4115 4116 /* Initialize network controller */ 4117 err = mvpp2_init(dev, priv); 4118 if (err < 0) { 4119 dev_err(&pdev->dev, "failed to initialize controller\n"); 4120 return err; 4121 } 4122 4123 return mvpp2_port_probe(dev, port, dev_of_offset(dev), priv, 4124 &buffer_loc.first_rxq); 4125 } 4126 4127 static const struct eth_ops mvpp2_ops = { 4128 .start = mvpp2_start, 4129 .send = mvpp2_send, 4130 .recv = mvpp2_recv, 4131 .stop = mvpp2_stop, 4132 }; 4133 4134 static struct driver mvpp2_driver = { 4135 .name = "mvpp2", 4136 .id = UCLASS_ETH, 4137 .probe = mvpp2_probe, 4138 .ops = &mvpp2_ops, 4139 .priv_auto_alloc_size = sizeof(struct mvpp2_port), 4140 .platdata_auto_alloc_size = sizeof(struct eth_pdata), 4141 }; 4142 4143 /* 4144 * Use a MISC device to bind the n instances (child nodes) of the 4145 * network base controller in UCLASS_ETH. 4146 */ 4147 static int mvpp2_base_probe(struct udevice *dev) 4148 { 4149 struct mvpp2 *priv = dev_get_priv(dev); 4150 struct mii_dev *bus; 4151 void *bd_space; 4152 u32 size = 0; 4153 int i; 4154 4155 /* Save hw-version */ 4156 priv->hw_version = dev_get_driver_data(dev); 4157 4158 /* 4159 * U-Boot special buffer handling: 4160 * 4161 * Allocate buffer area for descs and rx_buffers. This is only 4162 * done once for all interfaces. As only one interface can 4163 * be active. Make this area DMA-safe by disabling the D-cache 4164 */ 4165 4166 /* Align buffer area for descs and rx_buffers to 1MiB */ 4167 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 4168 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 4169 BD_SPACE, DCACHE_OFF); 4170 4171 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 4172 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 4173 4174 buffer_loc.tx_descs = 4175 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 4176 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 4177 4178 buffer_loc.rx_descs = 4179 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 4180 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 4181 4182 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 4183 buffer_loc.bm_pool[i] = 4184 (unsigned long *)((unsigned long)bd_space + size); 4185 size += MVPP2_BM_POOL_SIZE_MAX * sizeof(u32); 4186 } 4187 4188 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 4189 buffer_loc.rx_buffer[i] = 4190 (unsigned long *)((unsigned long)bd_space + size); 4191 size += RX_BUFFER_SIZE; 4192 } 4193 4194 /* Save base addresses for later use */ 4195 priv->base = (void *)dev_get_addr_index(dev, 0); 4196 if (IS_ERR(priv->base)) 4197 return PTR_ERR(priv->base); 4198 4199 priv->lms_base = (void *)dev_get_addr_index(dev, 1); 4200 if (IS_ERR(priv->lms_base)) 4201 return PTR_ERR(priv->lms_base); 4202 4203 /* Finally create and register the MDIO bus driver */ 4204 bus = mdio_alloc(); 4205 if (!bus) { 4206 printf("Failed to allocate MDIO bus\n"); 4207 return -ENOMEM; 4208 } 4209 4210 bus->read = mpp2_mdio_read; 4211 bus->write = mpp2_mdio_write; 4212 snprintf(bus->name, sizeof(bus->name), dev->name); 4213 bus->priv = (void *)priv; 4214 priv->bus = bus; 4215 4216 return mdio_register(bus); 4217 } 4218 4219 static int mvpp2_base_bind(struct udevice *parent) 4220 { 4221 const void *blob = gd->fdt_blob; 4222 int node = dev_of_offset(parent); 4223 struct uclass_driver *drv; 4224 struct udevice *dev; 4225 struct eth_pdata *plat; 4226 char *name; 4227 int subnode; 4228 u32 id; 4229 4230 /* Lookup eth driver */ 4231 drv = lists_uclass_lookup(UCLASS_ETH); 4232 if (!drv) { 4233 puts("Cannot find eth driver\n"); 4234 return -ENOENT; 4235 } 4236 4237 fdt_for_each_subnode(subnode, blob, node) { 4238 /* Skip disabled ports */ 4239 if (!fdtdec_get_is_enabled(blob, subnode)) 4240 continue; 4241 4242 plat = calloc(1, sizeof(*plat)); 4243 if (!plat) 4244 return -ENOMEM; 4245 4246 id = fdtdec_get_int(blob, subnode, "port-id", -1); 4247 4248 name = calloc(1, 16); 4249 sprintf(name, "mvpp2-%d", id); 4250 4251 /* Create child device UCLASS_ETH and bind it */ 4252 device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev); 4253 dev_set_of_offset(dev, subnode); 4254 } 4255 4256 return 0; 4257 } 4258 4259 static const struct udevice_id mvpp2_ids[] = { 4260 { 4261 .compatible = "marvell,armada-375-pp2", 4262 .data = MVPP21, 4263 }, 4264 { } 4265 }; 4266 4267 U_BOOT_DRIVER(mvpp2_base) = { 4268 .name = "mvpp2_base", 4269 .id = UCLASS_MISC, 4270 .of_match = mvpp2_ids, 4271 .bind = mvpp2_base_bind, 4272 .probe = mvpp2_base_probe, 4273 .priv_auto_alloc_size = sizeof(struct mvpp2), 4274 }; 4275