1/* 2 * Driver for Marvell PPv2 network controller for Armada 375 SoC. 3 * 4 * Copyright (C) 2014 Marvell 5 * 6 * Marcin Wojtas <mw@semihalf.com> 7 * 8 * U-Boot version: 9 * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de> 10 * 11 * This file is licensed under the terms of the GNU General Public 12 * License version 2. This program is licensed "as is" without any 13 * warranty of any kind, whether express or implied. 14 */ 15 16#include <common.h> 17#include <cpu_func.h> 18#include <dm.h> 19#include <asm/cache.h> 20#include <asm/global_data.h> 21#include <dm/device-internal.h> 22#include <dm/device_compat.h> 23#include <dm/devres.h> 24#include <dm/lists.h> 25#include <net.h> 26#include <netdev.h> 27#include <config.h> 28#include <malloc.h> 29#include <asm/io.h> 30#include <linux/bitops.h> 31#include <linux/bug.h> 32#include <linux/delay.h> 33#include <linux/err.h> 34#include <linux/errno.h> 35#include <phy.h> 36#include <miiphy.h> 37#include <watchdog.h> 38#include <asm/arch/cpu.h> 39#include <asm/arch/soc.h> 40#include <linux/compat.h> 41#include <linux/libfdt.h> 42#include <linux/mbus.h> 43#include <asm-generic/gpio.h> 44#include <fdt_support.h> 45#include <linux/mdio.h> 46 47DECLARE_GLOBAL_DATA_PTR; 48 49#define __verify_pcpu_ptr(ptr) \ 50do { \ 51 const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ 52 (void)__vpp_verify; \ 53} while (0) 54 55#define VERIFY_PERCPU_PTR(__p) \ 56({ \ 57 __verify_pcpu_ptr(__p); \ 58 (typeof(*(__p)) __kernel __force *)(__p); \ 59}) 60 61#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) 62#define smp_processor_id() 0 63#define num_present_cpus() 1 64#define for_each_present_cpu(cpu) \ 65 for ((cpu) = 0; (cpu) < 1; (cpu)++) 66 67#define NET_SKB_PAD max(32, MVPP2_CPU_D_CACHE_LINE_SIZE) 68 69/* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ 70#define WRAP (2 + ETH_HLEN + 4 + 32) 71#define MTU 1500 72#define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) 73 74/* RX Fifo Registers */ 75#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) 76#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) 77#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 78#define MVPP2_RX_FIFO_INIT_REG 0x64 79 80/* RX DMA Top Registers */ 81#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) 82#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) 83#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) 84#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) 85#define MVPP2_POOL_BUF_SIZE_OFFSET 5 86#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) 87#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff 88#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) 89#define MVPP2_RXQ_POOL_SHORT_OFFS 20 90#define MVPP21_RXQ_POOL_SHORT_MASK 0x700000 91#define MVPP22_RXQ_POOL_SHORT_MASK 0xf00000 92#define MVPP2_RXQ_POOL_LONG_OFFS 24 93#define MVPP21_RXQ_POOL_LONG_MASK 0x7000000 94#define MVPP22_RXQ_POOL_LONG_MASK 0xf000000 95#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 96#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 97#define MVPP2_RXQ_DISABLE_MASK BIT(31) 98 99/* Parser Registers */ 100#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 101#define MVPP2_PRS_PORT_LU_MAX 0xf 102#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) 103#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) 104#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) 105#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) 106#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) 107#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) 108#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) 109#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) 110#define MVPP2_PRS_TCAM_IDX_REG 0x1100 111#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) 112#define MVPP2_PRS_TCAM_INV_MASK BIT(31) 113#define MVPP2_PRS_SRAM_IDX_REG 0x1200 114#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) 115#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 116#define MVPP2_PRS_TCAM_EN_MASK BIT(0) 117 118/* Classifier Registers */ 119#define MVPP2_CLS_MODE_REG 0x1800 120#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) 121#define MVPP2_CLS_PORT_WAY_REG 0x1810 122#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) 123#define MVPP2_CLS_LKP_INDEX_REG 0x1814 124#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 125#define MVPP2_CLS_LKP_TBL_REG 0x1818 126#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff 127#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) 128#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 129#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 130#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 131#define MVPP2_CLS_FLOW_TBL2_REG 0x182c 132#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) 133#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 134#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 135#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) 136#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 137#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) 138 139/* Descriptor Manager Top Registers */ 140#define MVPP2_RXQ_NUM_REG 0x2040 141#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 142#define MVPP22_DESC_ADDR_OFFS 8 143#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 144#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 145#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) 146#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 147#define MVPP2_RXQ_NUM_NEW_OFFSET 16 148#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) 149#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff 150#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 151#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 152#define MVPP2_RXQ_THRESH_REG 0x204c 153#define MVPP2_OCCUPIED_THRESH_OFFSET 0 154#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff 155#define MVPP2_RXQ_INDEX_REG 0x2050 156#define MVPP2_TXQ_NUM_REG 0x2080 157#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 158#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 159#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 160#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 161#define MVPP2_TXQ_THRESH_REG 0x2094 162#define MVPP2_TRANSMITTED_THRESH_OFFSET 16 163#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 164#define MVPP2_TXQ_INDEX_REG 0x2098 165#define MVPP2_TXQ_PREF_BUF_REG 0x209c 166#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) 167#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) 168#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) 169#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) 170#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) 171#define MVPP2_TXQ_PENDING_REG 0x20a0 172#define MVPP2_TXQ_PENDING_MASK 0x3fff 173#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 174#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) 175#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 176#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 177#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 178#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 179#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 180#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff 181#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 182#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 183#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) 184#define MVPP22_AGGR_TXQ_DESC_ADDR_OFFS 8 185#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) 186#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 187#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) 188#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff 189#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) 190 191/* MBUS bridge registers */ 192#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) 193#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) 194#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) 195#define MVPP2_BASE_ADDR_ENABLE 0x4060 196 197/* AXI Bridge Registers */ 198#define MVPP22_AXI_BM_WR_ATTR_REG 0x4100 199#define MVPP22_AXI_BM_RD_ATTR_REG 0x4104 200#define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG 0x4110 201#define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG 0x4114 202#define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG 0x4118 203#define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG 0x411c 204#define MVPP22_AXI_RX_DATA_WR_ATTR_REG 0x4120 205#define MVPP22_AXI_TX_DATA_RD_ATTR_REG 0x4130 206#define MVPP22_AXI_RD_NORMAL_CODE_REG 0x4150 207#define MVPP22_AXI_RD_SNOOP_CODE_REG 0x4154 208#define MVPP22_AXI_WR_NORMAL_CODE_REG 0x4160 209#define MVPP22_AXI_WR_SNOOP_CODE_REG 0x4164 210 211/* Values for AXI Bridge registers */ 212#define MVPP22_AXI_ATTR_CACHE_OFFS 0 213#define MVPP22_AXI_ATTR_DOMAIN_OFFS 12 214 215#define MVPP22_AXI_CODE_CACHE_OFFS 0 216#define MVPP22_AXI_CODE_DOMAIN_OFFS 4 217 218#define MVPP22_AXI_CODE_CACHE_NON_CACHE 0x3 219#define MVPP22_AXI_CODE_CACHE_WR_CACHE 0x7 220#define MVPP22_AXI_CODE_CACHE_RD_CACHE 0xb 221 222#define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 2 223#define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 224 225/* Interrupt Cause and Mask registers */ 226#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) 227#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) 228 229#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 230#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 231#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 232#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 233 234#define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf 235#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 236 237#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 238#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f 239#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 240#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 241 242#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) 243#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) 244#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) 245#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) 246#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 247#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 248#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) 249#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) 250#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) 251#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) 252#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) 253#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) 254#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) 255#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc 256#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff 257#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 258#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) 259#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 260 261/* Buffer Manager registers */ 262#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) 263#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 264#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) 265#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 266#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) 267#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 268#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) 269#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 270#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) 271#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) 272#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff 273#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) 274#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) 275#define MVPP2_BM_START_MASK BIT(0) 276#define MVPP2_BM_STOP_MASK BIT(1) 277#define MVPP2_BM_STATE_MASK BIT(4) 278#define MVPP2_BM_LOW_THRESH_OFFS 8 279#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 280#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ 281 MVPP2_BM_LOW_THRESH_OFFS) 282#define MVPP2_BM_HIGH_THRESH_OFFS 16 283#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 284#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ 285 MVPP2_BM_HIGH_THRESH_OFFS) 286#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) 287#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) 288#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) 289#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) 290#define MVPP2_BM_BPPE_FULL_MASK BIT(3) 291#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) 292#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) 293#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) 294#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) 295#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 296#define MVPP2_BM_ADDR_HIGH_ALLOC 0x6444 297#define MVPP2_BM_ADDR_HIGH_PHYS_MASK 0xff 298#define MVPP2_BM_ADDR_HIGH_VIRT_MASK 0xff00 299#define MVPP2_BM_ADDR_HIGH_VIRT_SHIFT 8 300#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) 301#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) 302#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) 303#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) 304#define MVPP2_BM_VIRT_RLS_REG 0x64c0 305#define MVPP21_BM_MC_RLS_REG 0x64c4 306#define MVPP2_BM_MC_ID_MASK 0xfff 307#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) 308#define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 309#define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff 310#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 311#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 312#define MVPP22_BM_MC_RLS_REG 0x64d4 313#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310 314#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff 315 316/* TX Scheduler registers */ 317#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 318#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 319#define MVPP2_TXP_SCHED_ENQ_MASK 0xff 320#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 321#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 322#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 323#define MVPP2_TXP_SCHED_MTU_REG 0x801c 324#define MVPP2_TXP_MTU_MAX 0x7FFFF 325#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 326#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff 327#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 328#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) 329#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 330#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff 331#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) 332#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff 333#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 334#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) 335#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) 336#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff 337#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) 338#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff 339 340/* TX general registers */ 341#define MVPP2_TX_SNOOP_REG 0x8800 342#define MVPP2_TX_PORT_FLUSH_REG 0x8810 343#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) 344 345/* LMS registers */ 346#define MVPP2_SRC_ADDR_MIDDLE 0x24 347#define MVPP2_SRC_ADDR_HIGH 0x28 348#define MVPP2_PHY_AN_CFG0_REG 0x34 349#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) 350#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c 351#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 352 353/* Per-port registers */ 354#define MVPP2_GMAC_CTRL_0_REG 0x0 355#define MVPP2_GMAC_PORT_EN_MASK BIT(0) 356#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) 357#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 358#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc 359#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) 360#define MVPP2_GMAC_CTRL_1_REG 0x4 361#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) 362#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) 363#define MVPP2_GMAC_PCS_LB_EN_BIT 6 364#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) 365#define MVPP2_GMAC_SA_LOW_OFFS 7 366#define MVPP2_GMAC_CTRL_2_REG 0x8 367#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 368#define MVPP2_GMAC_SGMII_MODE_MASK BIT(0) 369#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 370#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 371#define MVPP2_GMAC_PORT_DIS_PADING_MASK BIT(5) 372#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 373#define MVPP2_GMAC_CLK_125_BYPS_EN_MASK BIT(9) 374#define MVPP2_GMAC_AUTONEG_CONFIG 0xc 375#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) 376#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) 377#define MVPP2_GMAC_EN_PCS_AN BIT(2) 378#define MVPP2_GMAC_AN_BYPASS_EN BIT(3) 379#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) 380#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) 381#define MVPP2_GMAC_AN_SPEED_EN BIT(7) 382#define MVPP2_GMAC_FC_ADV_EN BIT(9) 383#define MVPP2_GMAC_EN_FC_AN BIT(11) 384#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) 385#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) 386#define MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG BIT(15) 387#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c 388#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 389#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 390#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ 391 MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) 392#define MVPP2_GMAC_CTRL_4_REG 0x90 393#define MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK BIT(0) 394#define MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK BIT(5) 395#define MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK BIT(6) 396#define MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK BIT(7) 397 398/* 399 * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, 400 * relative to port->base. 401 */ 402 403/* Port Mac Control0 */ 404#define MVPP22_XLG_CTRL0_REG 0x100 405#define MVPP22_XLG_PORT_EN BIT(0) 406#define MVPP22_XLG_MAC_RESETN BIT(1) 407#define MVPP22_XLG_RX_FC_EN BIT(7) 408#define MVPP22_XLG_MIBCNT_DIS BIT(13) 409/* Port Mac Control1 */ 410#define MVPP22_XLG_CTRL1_REG 0x104 411#define MVPP22_XLG_MAX_RX_SIZE_OFFS 0 412#define MVPP22_XLG_MAX_RX_SIZE_MASK 0x1fff 413/* Port Interrupt Mask */ 414#define MVPP22_XLG_INTERRUPT_MASK_REG 0x118 415#define MVPP22_XLG_INTERRUPT_LINK_CHANGE BIT(1) 416/* Port Mac Control3 */ 417#define MVPP22_XLG_CTRL3_REG 0x11c 418#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) 419#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) 420#define MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC (1 << 13) 421/* Port Mac Control4 */ 422#define MVPP22_XLG_CTRL4_REG 0x184 423#define MVPP22_XLG_FORWARD_802_3X_FC_EN BIT(5) 424#define MVPP22_XLG_FORWARD_PFC_EN BIT(6) 425#define MVPP22_XLG_MODE_DMA_1G BIT(12) 426#define MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK BIT(14) 427 428/* XPCS registers */ 429 430/* Global Configuration 0 */ 431#define MVPP22_XPCS_GLOBAL_CFG_0_REG 0x0 432#define MVPP22_XPCS_PCSRESET BIT(0) 433#define MVPP22_XPCS_PCSMODE_OFFS 3 434#define MVPP22_XPCS_PCSMODE_MASK (0x3 << \ 435 MVPP22_XPCS_PCSMODE_OFFS) 436#define MVPP22_XPCS_LANEACTIVE_OFFS 5 437#define MVPP22_XPCS_LANEACTIVE_MASK (0x3 << \ 438 MVPP22_XPCS_LANEACTIVE_OFFS) 439 440/* MPCS registers */ 441 442#define PCS40G_COMMON_CONTROL 0x14 443#define FORWARD_ERROR_CORRECTION_MASK BIT(10) 444 445#define PCS_CLOCK_RESET 0x14c 446#define TX_SD_CLK_RESET_MASK BIT(0) 447#define RX_SD_CLK_RESET_MASK BIT(1) 448#define MAC_CLK_RESET_MASK BIT(2) 449#define CLK_DIVISION_RATIO_OFFS 4 450#define CLK_DIVISION_RATIO_MASK (0x7 << CLK_DIVISION_RATIO_OFFS) 451#define CLK_DIV_PHASE_SET_MASK BIT(11) 452 453/* System Soft Reset 1 */ 454#define GOP_SOFT_RESET_1_REG 0x108 455#define NETC_GOP_SOFT_RESET_OFFS 6 456#define NETC_GOP_SOFT_RESET_MASK (0x1 << \ 457 NETC_GOP_SOFT_RESET_OFFS) 458 459/* Ports Control 0 */ 460#define NETCOMP_PORTS_CONTROL_0_REG 0x110 461#define NETC_BUS_WIDTH_SELECT_OFFS 1 462#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << \ 463 NETC_BUS_WIDTH_SELECT_OFFS) 464#define NETC_GIG_RX_DATA_SAMPLE_OFFS 29 465#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << \ 466 NETC_GIG_RX_DATA_SAMPLE_OFFS) 467#define NETC_CLK_DIV_PHASE_OFFS 31 468#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFS) 469/* Ports Control 1 */ 470#define NETCOMP_PORTS_CONTROL_1_REG 0x114 471#define NETC_PORTS_ACTIVE_OFFSET(p) (0 + p) 472#define NETC_PORTS_ACTIVE_MASK(p) (0x1 << \ 473 NETC_PORTS_ACTIVE_OFFSET(p)) 474#define NETC_PORT_GIG_RF_RESET_OFFS(p) (28 + p) 475#define NETC_PORT_GIG_RF_RESET_MASK(p) (0x1 << \ 476 NETC_PORT_GIG_RF_RESET_OFFS(p)) 477#define NETCOMP_CONTROL_0_REG 0x120 478#define NETC_GBE_PORT0_SGMII_MODE_OFFS 0 479#define NETC_GBE_PORT0_SGMII_MODE_MASK (0x1 << \ 480 NETC_GBE_PORT0_SGMII_MODE_OFFS) 481#define NETC_GBE_PORT1_SGMII_MODE_OFFS 1 482#define NETC_GBE_PORT1_SGMII_MODE_MASK (0x1 << \ 483 NETC_GBE_PORT1_SGMII_MODE_OFFS) 484#define NETC_GBE_PORT1_MII_MODE_OFFS 2 485#define NETC_GBE_PORT1_MII_MODE_MASK (0x1 << \ 486 NETC_GBE_PORT1_MII_MODE_OFFS) 487 488#define MVPP22_SMI_MISC_CFG_REG (MVPP22_SMI + 0x04) 489#define MVPP22_SMI_POLLING_EN BIT(10) 490 491#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff 492 493/* Descriptor ring Macros */ 494#define MVPP2_QUEUE_NEXT_DESC(q, index) \ 495 (((index) < (q)->last_desc) ? ((index) + 1) : 0) 496 497/* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */ 498#define MVPP22_SMI 0x1200 499 500/* Additional PPv2.2 offsets */ 501#define MVPP22_MPCS 0x007000 502#define MVPP22_XPCS 0x007400 503#define MVPP22_PORT_BASE 0x007e00 504#define MVPP22_PORT_OFFSET 0x001000 505#define MVPP22_RFU1 0x318000 506 507/* Maximum number of ports */ 508#define MVPP22_GOP_MAC_NUM 4 509 510/* Sets the field located at the specified in data */ 511#define MVPP2_RGMII_TX_FIFO_MIN_TH 0x41 512#define MVPP2_SGMII_TX_FIFO_MIN_TH 0x5 513#define MVPP2_SGMII2_5_TX_FIFO_MIN_TH 0xb 514 515/* Net Complex */ 516enum mv_netc_topology { 517 MV_NETC_GE_MAC2_SGMII = BIT(0), 518 MV_NETC_GE_MAC2_RGMII = BIT(1), 519 MV_NETC_GE_MAC3_SGMII = BIT(2), 520 MV_NETC_GE_MAC3_RGMII = BIT(3), 521}; 522 523enum mv_netc_phase { 524 MV_NETC_FIRST_PHASE, 525 MV_NETC_SECOND_PHASE, 526}; 527 528enum mv_netc_sgmii_xmi_mode { 529 MV_NETC_GBE_SGMII, 530 MV_NETC_GBE_XMII, 531}; 532 533enum mv_netc_mii_mode { 534 MV_NETC_GBE_RGMII, 535 MV_NETC_GBE_MII, 536}; 537 538enum mv_netc_lanes { 539 MV_NETC_LANE_23, 540 MV_NETC_LANE_45, 541}; 542 543/* Various constants */ 544 545/* Coalescing */ 546#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 547#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL 548#define MVPP2_RX_COAL_PKTS 32 549#define MVPP2_RX_COAL_USEC 100 550 551/* The two bytes Marvell header. Either contains a special value used 552 * by Marvell switches when a specific hardware mode is enabled (not 553 * supported by this driver) or is filled automatically by zeroes on 554 * the RX side. Those two bytes being at the front of the Ethernet 555 * header, they allow to have the IP header aligned on a 4 bytes 556 * boundary automatically: the hardware skips those two bytes on its 557 * own. 558 */ 559#define MVPP2_MH_SIZE 2 560#define MVPP2_ETH_TYPE_LEN 2 561#define MVPP2_PPPOE_HDR_SIZE 8 562#define MVPP2_VLAN_TAG_LEN 4 563 564/* Lbtd 802.3 type */ 565#define MVPP2_IP_LBDT_TYPE 0xfffa 566 567#define MVPP2_CPU_D_CACHE_LINE_SIZE 32 568#define MVPP2_TX_CSUM_MAX_SIZE 9800 569 570/* Timeout constants */ 571#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 572#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 573 574#define MVPP2_TX_MTU_MAX 0x7ffff 575 576/* Maximum number of T-CONTs of PON port */ 577#define MVPP2_MAX_TCONT 16 578 579/* Maximum number of supported ports */ 580#define MVPP2_MAX_PORTS 4 581 582/* Maximum number of TXQs used by single port */ 583#define MVPP2_MAX_TXQ 8 584 585/* Default number of TXQs in use */ 586#define MVPP2_DEFAULT_TXQ 1 587 588/* Default number of RXQs in use */ 589#define MVPP2_DEFAULT_RXQ 1 590#define CFG_MV_ETH_RXQ 8 /* increment by 8 */ 591 592/* Max number of Rx descriptors */ 593#define MVPP2_MAX_RXD 16 594 595/* Max number of Tx descriptors */ 596#define MVPP2_MAX_TXD 16 597 598/* Amount of Tx descriptors that can be reserved at once by CPU */ 599#define MVPP2_CPU_DESC_CHUNK 16 600 601/* Max number of Tx descriptors in each aggregated queue */ 602#define MVPP2_AGGR_TXQ_SIZE 16 603 604/* Descriptor aligned size */ 605#define MVPP2_DESC_ALIGNED_SIZE 32 606 607/* Descriptor alignment mask */ 608#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) 609 610/* RX FIFO constants */ 611#define MVPP21_RX_FIFO_PORT_DATA_SIZE 0x2000 612#define MVPP21_RX_FIFO_PORT_ATTR_SIZE 0x80 613#define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE 0x8000 614#define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE 0x2000 615#define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE 0x1000 616#define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE 0x200 617#define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE 0x80 618#define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE 0x40 619#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 620 621/* TX general registers */ 622#define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port) (0x8860 + ((eth_tx_port) << 2)) 623#define MVPP22_TX_FIFO_SIZE_MASK 0xf 624 625/* TX FIFO constants */ 626#define MVPP2_TX_FIFO_DATA_SIZE_10KB 0xa 627#define MVPP2_TX_FIFO_DATA_SIZE_3KB 0x3 628 629/* RX buffer constants */ 630#define MVPP2_SKB_SHINFO_SIZE \ 631 0 632 633#define MVPP2_RX_PKT_SIZE(mtu) \ 634 ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \ 635 ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE) 636 637#define MVPP2_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) 638#define MVPP2_RX_TOTAL_SIZE(buf_size) ((buf_size) + MVPP2_SKB_SHINFO_SIZE) 639#define MVPP2_RX_MAX_PKT_SIZE(total_size) \ 640 ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE) 641 642#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) 643 644/* IPv6 max L3 address size */ 645#define MVPP2_MAX_L3_ADDR_SIZE 16 646 647/* Port flags */ 648#define MVPP2_F_LOOPBACK BIT(0) 649 650/* Marvell tag types */ 651enum mvpp2_tag_type { 652 MVPP2_TAG_TYPE_NONE = 0, 653 MVPP2_TAG_TYPE_MH = 1, 654 MVPP2_TAG_TYPE_DSA = 2, 655 MVPP2_TAG_TYPE_EDSA = 3, 656 MVPP2_TAG_TYPE_VLAN = 4, 657 MVPP2_TAG_TYPE_LAST = 5 658}; 659 660/* Parser constants */ 661#define MVPP2_PRS_TCAM_SRAM_SIZE 256 662#define MVPP2_PRS_TCAM_WORDS 6 663#define MVPP2_PRS_SRAM_WORDS 4 664#define MVPP2_PRS_FLOW_ID_SIZE 64 665#define MVPP2_PRS_FLOW_ID_MASK 0x3f 666#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 667#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) 668#define MVPP2_PRS_IPV4_HEAD 0x40 669#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 670#define MVPP2_PRS_IPV4_MC 0xe0 671#define MVPP2_PRS_IPV4_MC_MASK 0xf0 672#define MVPP2_PRS_IPV4_BC_MASK 0xff 673#define MVPP2_PRS_IPV4_IHL 0x5 674#define MVPP2_PRS_IPV4_IHL_MASK 0xf 675#define MVPP2_PRS_IPV6_MC 0xff 676#define MVPP2_PRS_IPV6_MC_MASK 0xff 677#define MVPP2_PRS_IPV6_HOP_MASK 0xff 678#define MVPP2_PRS_TCAM_PROTO_MASK 0xff 679#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f 680#define MVPP2_PRS_DBL_VLANS_MAX 100 681 682/* Tcam structure: 683 * - lookup ID - 4 bits 684 * - port ID - 1 byte 685 * - additional information - 1 byte 686 * - header data - 8 bytes 687 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). 688 */ 689#define MVPP2_PRS_AI_BITS 8 690#define MVPP2_PRS_PORT_MASK 0xff 691#define MVPP2_PRS_LU_MASK 0xf 692#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ 693 (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) 694#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ 695 (((offs) * 2) - ((offs) % 2) + 2) 696#define MVPP2_PRS_TCAM_AI_BYTE 16 697#define MVPP2_PRS_TCAM_PORT_BYTE 17 698#define MVPP2_PRS_TCAM_LU_BYTE 20 699#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) 700#define MVPP2_PRS_TCAM_INV_WORD 5 701/* Tcam entries ID */ 702#define MVPP2_PE_DROP_ALL 0 703#define MVPP2_PE_FIRST_FREE_TID 1 704#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) 705#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) 706#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) 707#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) 708#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) 709#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) 710#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) 711#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) 712#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) 713#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) 714#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) 715#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) 716#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) 717#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) 718#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) 719#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) 720#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) 721#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) 722#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) 723#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) 724#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) 725#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) 726#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) 727#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) 728#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) 729 730/* Sram structure 731 * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). 732 */ 733#define MVPP2_PRS_SRAM_RI_OFFS 0 734#define MVPP2_PRS_SRAM_RI_WORD 0 735#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 736#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 737#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 738#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 739#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 740#define MVPP2_PRS_SRAM_UDF_OFFS 73 741#define MVPP2_PRS_SRAM_UDF_BITS 8 742#define MVPP2_PRS_SRAM_UDF_MASK 0xff 743#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 744#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 745#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 746#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 747#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 748#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 749#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 750#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 751#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 752#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 753#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 754#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 755#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 756#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 757#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 758#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 759#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 760#define MVPP2_PRS_SRAM_AI_OFFS 90 761#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 762#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 763#define MVPP2_PRS_SRAM_AI_MASK 0xff 764#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 765#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf 766#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 767#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 768 769/* Sram result info bits assignment */ 770#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 771#define MVPP2_PRS_RI_DSA_MASK 0x2 772#define MVPP2_PRS_RI_VLAN_MASK (BIT(2) | BIT(3)) 773#define MVPP2_PRS_RI_VLAN_NONE 0x0 774#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) 775#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) 776#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) 777#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 778#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) 779#define MVPP2_PRS_RI_L2_CAST_MASK (BIT(9) | BIT(10)) 780#define MVPP2_PRS_RI_L2_UCAST 0x0 781#define MVPP2_PRS_RI_L2_MCAST BIT(9) 782#define MVPP2_PRS_RI_L2_BCAST BIT(10) 783#define MVPP2_PRS_RI_PPPOE_MASK 0x800 784#define MVPP2_PRS_RI_L3_PROTO_MASK (BIT(12) | BIT(13) | BIT(14)) 785#define MVPP2_PRS_RI_L3_UN 0x0 786#define MVPP2_PRS_RI_L3_IP4 BIT(12) 787#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) 788#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) 789#define MVPP2_PRS_RI_L3_IP6 BIT(14) 790#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) 791#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) 792#define MVPP2_PRS_RI_L3_ADDR_MASK (BIT(15) | BIT(16)) 793#define MVPP2_PRS_RI_L3_UCAST 0x0 794#define MVPP2_PRS_RI_L3_MCAST BIT(15) 795#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 796#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 797#define MVPP2_PRS_RI_UDF3_MASK 0x300000 798#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 799#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 800#define MVPP2_PRS_RI_L4_TCP BIT(22) 801#define MVPP2_PRS_RI_L4_UDP BIT(23) 802#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) 803#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 804#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) 805#define MVPP2_PRS_RI_DROP_MASK 0x80000000 806 807/* Sram additional info bits assignment */ 808#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) 809#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) 810#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) 811#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) 812#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) 813#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) 814#define MVPP2_PRS_SINGLE_VLAN_AI 0 815#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) 816 817/* DSA/EDSA type */ 818#define MVPP2_PRS_TAGGED true 819#define MVPP2_PRS_UNTAGGED false 820#define MVPP2_PRS_EDSA true 821#define MVPP2_PRS_DSA false 822 823/* MAC entries, shadow udf */ 824enum mvpp2_prs_udf { 825 MVPP2_PRS_UDF_MAC_DEF, 826 MVPP2_PRS_UDF_MAC_RANGE, 827 MVPP2_PRS_UDF_L2_DEF, 828 MVPP2_PRS_UDF_L2_DEF_COPY, 829 MVPP2_PRS_UDF_L2_USER, 830}; 831 832/* Lookup ID */ 833enum mvpp2_prs_lookup { 834 MVPP2_PRS_LU_MH, 835 MVPP2_PRS_LU_MAC, 836 MVPP2_PRS_LU_DSA, 837 MVPP2_PRS_LU_VLAN, 838 MVPP2_PRS_LU_L2, 839 MVPP2_PRS_LU_PPPOE, 840 MVPP2_PRS_LU_IP4, 841 MVPP2_PRS_LU_IP6, 842 MVPP2_PRS_LU_FLOWS, 843 MVPP2_PRS_LU_LAST, 844}; 845 846/* L3 cast enum */ 847enum mvpp2_prs_l3_cast { 848 MVPP2_PRS_L3_UNI_CAST, 849 MVPP2_PRS_L3_MULTI_CAST, 850 MVPP2_PRS_L3_BROAD_CAST 851}; 852 853/* Classifier constants */ 854#define MVPP2_CLS_FLOWS_TBL_SIZE 512 855#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 856#define MVPP2_CLS_LKP_TBL_SIZE 64 857 858/* BM constants */ 859#define MVPP2_BM_POOLS_NUM 1 860#define MVPP2_BM_LONG_BUF_NUM 16 861#define MVPP2_BM_SHORT_BUF_NUM 16 862#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) 863#define MVPP2_BM_POOL_PTR_ALIGN 128 864#define MVPP2_BM_SWF_LONG_POOL(port) 0 865 866/* BM cookie (32 bits) definition */ 867#define MVPP2_BM_COOKIE_POOL_OFFS 8 868#define MVPP2_BM_COOKIE_CPU_OFFS 24 869 870/* BM short pool packet size 871 * These value assure that for SWF the total number 872 * of bytes allocated for each buffer will be 512 873 */ 874#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) 875 876enum mvpp2_bm_type { 877 MVPP2_BM_FREE, 878 MVPP2_BM_SWF_LONG, 879 MVPP2_BM_SWF_SHORT 880}; 881 882/* Definitions */ 883 884/* Shared Packet Processor resources */ 885struct mvpp2 { 886 /* Shared registers' base addresses */ 887 void __iomem *base; 888 void __iomem *lms_base; 889 void __iomem *iface_base; 890 891 void __iomem *mpcs_base; 892 void __iomem *xpcs_base; 893 void __iomem *rfu1_base; 894 895 u32 netc_config; 896 897 /* List of pointers to port structures */ 898 struct mvpp2_port **port_list; 899 900 /* Aggregated TXQs */ 901 struct mvpp2_tx_queue *aggr_txqs; 902 903 /* BM pools */ 904 struct mvpp2_bm_pool *bm_pools; 905 906 /* PRS shadow table */ 907 struct mvpp2_prs_shadow *prs_shadow; 908 /* PRS auxiliary table for double vlan entries control */ 909 bool *prs_double_vlans; 910 911 /* Tclk value */ 912 u32 tclk; 913 914 /* HW version */ 915 enum { MVPP21, MVPP22 } hw_version; 916 917 /* Maximum number of RXQs per port */ 918 unsigned int max_port_rxqs; 919 920 int probe_done; 921 u8 num_ports; 922}; 923 924struct mvpp2_pcpu_stats { 925 u64 rx_packets; 926 u64 rx_bytes; 927 u64 tx_packets; 928 u64 tx_bytes; 929}; 930 931struct mvpp2_port { 932 u8 id; 933 934 /* Index of the port from the "group of ports" complex point 935 * of view 936 */ 937 int gop_id; 938 939 int irq; 940 941 struct mvpp2 *priv; 942 943 /* Per-port registers' base address */ 944 void __iomem *base; 945 946 struct mvpp2_rx_queue **rxqs; 947 struct mvpp2_tx_queue **txqs; 948 949 int pkt_size; 950 951 u32 pending_cause_rx; 952 953 /* Per-CPU port control */ 954 struct mvpp2_port_pcpu __percpu *pcpu; 955 956 /* Flags */ 957 unsigned long flags; 958 959 u16 tx_ring_size; 960 u16 rx_ring_size; 961 struct mvpp2_pcpu_stats __percpu *stats; 962 963 struct phy_device *phy_dev; 964 phy_interface_t phy_interface; 965 int phyaddr; 966 struct udevice *mdio_dev; 967 struct mii_dev *bus; 968#if CONFIG_IS_ENABLED(DM_GPIO) 969 struct gpio_desc phy_reset_gpio; 970 struct gpio_desc phy_tx_disable_gpio; 971#endif 972 int init; 973 unsigned int link; 974 unsigned int duplex; 975 unsigned int speed; 976 977 struct mvpp2_bm_pool *pool_long; 978 struct mvpp2_bm_pool *pool_short; 979 980 /* Index of first port's physical RXQ */ 981 u8 first_rxq; 982 983 u8 dev_addr[ETH_ALEN]; 984}; 985 986/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the 987 * layout of the transmit and reception DMA descriptors, and their 988 * layout is therefore defined by the hardware design 989 */ 990 991#define MVPP2_TXD_L3_OFF_SHIFT 0 992#define MVPP2_TXD_IP_HLEN_SHIFT 8 993#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) 994#define MVPP2_TXD_L4_CSUM_NOT BIT(14) 995#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) 996#define MVPP2_TXD_PADDING_DISABLE BIT(23) 997#define MVPP2_TXD_L4_UDP BIT(24) 998#define MVPP2_TXD_L3_IP6 BIT(26) 999#define MVPP2_TXD_L_DESC BIT(28) 1000#define MVPP2_TXD_F_DESC BIT(29) 1001 1002#define MVPP2_RXD_ERR_SUMMARY BIT(15) 1003#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) 1004#define MVPP2_RXD_ERR_CRC 0x0 1005#define MVPP2_RXD_ERR_OVERRUN BIT(13) 1006#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) 1007#define MVPP2_RXD_BM_POOL_ID_OFFS 16 1008#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) 1009#define MVPP2_RXD_HWF_SYNC BIT(21) 1010#define MVPP2_RXD_L4_CSUM_OK BIT(22) 1011#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) 1012#define MVPP2_RXD_L4_TCP BIT(25) 1013#define MVPP2_RXD_L4_UDP BIT(26) 1014#define MVPP2_RXD_L3_IP4 BIT(28) 1015#define MVPP2_RXD_L3_IP6 BIT(30) 1016#define MVPP2_RXD_BUF_HDR BIT(31) 1017 1018/* HW TX descriptor for PPv2.1 */ 1019struct mvpp21_tx_desc { 1020 u32 command; /* Options used by HW for packet transmitting.*/ 1021 u8 packet_offset; /* the offset from the buffer beginning */ 1022 u8 phys_txq; /* destination queue ID */ 1023 u16 data_size; /* data size of transmitted packet in bytes */ 1024 u32 buf_dma_addr; /* physical addr of transmitted buffer */ 1025 u32 buf_cookie; /* cookie for access to TX buffer in tx path */ 1026 u32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ 1027 u32 reserved2; /* reserved (for future use) */ 1028}; 1029 1030/* HW RX descriptor for PPv2.1 */ 1031struct mvpp21_rx_desc { 1032 u32 status; /* info about received packet */ 1033 u16 reserved1; /* parser_info (for future use, PnC) */ 1034 u16 data_size; /* size of received packet in bytes */ 1035 u32 buf_dma_addr; /* physical address of the buffer */ 1036 u32 buf_cookie; /* cookie for access to RX buffer in rx path */ 1037 u16 reserved2; /* gem_port_id (for future use, PON) */ 1038 u16 reserved3; /* csum_l4 (for future use, PnC) */ 1039 u8 reserved4; /* bm_qset (for future use, BM) */ 1040 u8 reserved5; 1041 u16 reserved6; /* classify_info (for future use, PnC) */ 1042 u32 reserved7; /* flow_id (for future use, PnC) */ 1043 u32 reserved8; 1044}; 1045 1046/* HW TX descriptor for PPv2.2 */ 1047struct mvpp22_tx_desc { 1048 u32 command; 1049 u8 packet_offset; 1050 u8 phys_txq; 1051 u16 data_size; 1052 u64 reserved1; 1053 u64 buf_dma_addr_ptp; 1054 u64 buf_cookie_misc; 1055}; 1056 1057/* HW RX descriptor for PPv2.2 */ 1058struct mvpp22_rx_desc { 1059 u32 status; 1060 u16 reserved1; 1061 u16 data_size; 1062 u32 reserved2; 1063 u32 reserved3; 1064 u64 buf_dma_addr_key_hash; 1065 u64 buf_cookie_misc; 1066}; 1067 1068/* Opaque type used by the driver to manipulate the HW TX and RX 1069 * descriptors 1070 */ 1071struct mvpp2_tx_desc { 1072 union { 1073 struct mvpp21_tx_desc pp21; 1074 struct mvpp22_tx_desc pp22; 1075 }; 1076}; 1077 1078struct mvpp2_rx_desc { 1079 union { 1080 struct mvpp21_rx_desc pp21; 1081 struct mvpp22_rx_desc pp22; 1082 }; 1083}; 1084 1085/* Per-CPU Tx queue control */ 1086struct mvpp2_txq_pcpu { 1087 int cpu; 1088 1089 /* Number of Tx DMA descriptors in the descriptor ring */ 1090 int size; 1091 1092 /* Number of currently used Tx DMA descriptor in the 1093 * descriptor ring 1094 */ 1095 int count; 1096 1097 /* Number of Tx DMA descriptors reserved for each CPU */ 1098 int reserved_num; 1099 1100 /* Index of last TX DMA descriptor that was inserted */ 1101 int txq_put_index; 1102 1103 /* Index of the TX DMA descriptor to be cleaned up */ 1104 int txq_get_index; 1105}; 1106 1107struct mvpp2_tx_queue { 1108 /* Physical number of this Tx queue */ 1109 u8 id; 1110 1111 /* Logical number of this Tx queue */ 1112 u8 log_id; 1113 1114 /* Number of Tx DMA descriptors in the descriptor ring */ 1115 int size; 1116 1117 /* Number of currently used Tx DMA descriptor in the descriptor ring */ 1118 int count; 1119 1120 /* Per-CPU control of physical Tx queues */ 1121 struct mvpp2_txq_pcpu __percpu *pcpu; 1122 1123 u32 done_pkts_coal; 1124 1125 /* Virtual address of thex Tx DMA descriptors array */ 1126 struct mvpp2_tx_desc *descs; 1127 1128 /* DMA address of the Tx DMA descriptors array */ 1129 dma_addr_t descs_dma; 1130 1131 /* Index of the last Tx DMA descriptor */ 1132 int last_desc; 1133 1134 /* Index of the next Tx DMA descriptor to process */ 1135 int next_desc_to_proc; 1136}; 1137 1138struct mvpp2_rx_queue { 1139 /* RX queue number, in the range 0-31 for physical RXQs */ 1140 u8 id; 1141 1142 /* Num of rx descriptors in the rx descriptor ring */ 1143 int size; 1144 1145 u32 pkts_coal; 1146 u32 time_coal; 1147 1148 /* Virtual address of the RX DMA descriptors array */ 1149 struct mvpp2_rx_desc *descs; 1150 1151 /* DMA address of the RX DMA descriptors array */ 1152 dma_addr_t descs_dma; 1153 1154 /* Index of the last RX DMA descriptor */ 1155 int last_desc; 1156 1157 /* Index of the next RX DMA descriptor to process */ 1158 int next_desc_to_proc; 1159 1160 /* ID of port to which physical RXQ is mapped */ 1161 int port; 1162 1163 /* Port's logic RXQ number to which physical RXQ is mapped */ 1164 int logic_rxq; 1165}; 1166 1167union mvpp2_prs_tcam_entry { 1168 u32 word[MVPP2_PRS_TCAM_WORDS]; 1169 u8 byte[MVPP2_PRS_TCAM_WORDS * 4]; 1170}; 1171 1172union mvpp2_prs_sram_entry { 1173 u32 word[MVPP2_PRS_SRAM_WORDS]; 1174 u8 byte[MVPP2_PRS_SRAM_WORDS * 4]; 1175}; 1176 1177struct mvpp2_prs_entry { 1178 u32 index; 1179 union mvpp2_prs_tcam_entry tcam; 1180 union mvpp2_prs_sram_entry sram; 1181}; 1182 1183struct mvpp2_prs_shadow { 1184 bool valid; 1185 bool finish; 1186 1187 /* Lookup ID */ 1188 int lu; 1189 1190 /* User defined offset */ 1191 int udf; 1192 1193 /* Result info */ 1194 u32 ri; 1195 u32 ri_mask; 1196}; 1197 1198struct mvpp2_cls_flow_entry { 1199 u32 index; 1200 u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; 1201}; 1202 1203struct mvpp2_cls_lookup_entry { 1204 u32 lkpid; 1205 u32 way; 1206 u32 data; 1207}; 1208 1209struct mvpp2_bm_pool { 1210 /* Pool number in the range 0-7 */ 1211 int id; 1212 enum mvpp2_bm_type type; 1213 1214 /* Buffer Pointers Pool External (BPPE) size */ 1215 int size; 1216 /* Number of buffers for this pool */ 1217 int buf_num; 1218 /* Pool buffer size */ 1219 int buf_size; 1220 /* Packet size */ 1221 int pkt_size; 1222 1223 /* BPPE virtual base address */ 1224 unsigned long *virt_addr; 1225 /* BPPE DMA base address */ 1226 dma_addr_t dma_addr; 1227 1228 /* Ports using BM pool */ 1229 u32 port_map; 1230}; 1231 1232/* Static declaractions */ 1233 1234/* Number of RXQs used by single port */ 1235static int rxq_number = MVPP2_DEFAULT_RXQ; 1236/* Number of TXQs used by single port */ 1237static int txq_number = MVPP2_DEFAULT_TXQ; 1238 1239static int base_id; 1240 1241#define MVPP2_DRIVER_NAME "mvpp2" 1242#define MVPP2_DRIVER_VERSION "1.0" 1243 1244/* 1245 * U-Boot internal data, mostly uncached buffers for descriptors and data 1246 */ 1247struct buffer_location { 1248 struct mvpp2_tx_desc *aggr_tx_descs; 1249 struct mvpp2_tx_desc *tx_descs; 1250 struct mvpp2_rx_desc *rx_descs; 1251 unsigned long *bm_pool[MVPP2_BM_POOLS_NUM]; 1252 unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM]; 1253 int first_rxq; 1254}; 1255 1256/* 1257 * All 4 interfaces use the same global buffer, since only one interface 1258 * can be enabled at once 1259 */ 1260static struct buffer_location buffer_loc; 1261static int buffer_loc_init; 1262 1263/* 1264 * Page table entries are set to 1MB, or multiples of 1MB 1265 * (not < 1MB). driver uses less bd's so use 1MB bdspace. 1266 */ 1267#define BD_SPACE (1 << 20) 1268 1269/* Utility/helper methods */ 1270 1271static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) 1272{ 1273 writel(data, priv->base + offset); 1274} 1275 1276static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) 1277{ 1278 return readl(priv->base + offset); 1279} 1280 1281static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, 1282 struct mvpp2_tx_desc *tx_desc, 1283 dma_addr_t dma_addr) 1284{ 1285 if (port->priv->hw_version == MVPP21) { 1286 tx_desc->pp21.buf_dma_addr = dma_addr; 1287 } else { 1288 u64 val = (u64)dma_addr; 1289 1290 tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); 1291 tx_desc->pp22.buf_dma_addr_ptp |= val; 1292 } 1293} 1294 1295static void mvpp2_txdesc_size_set(struct mvpp2_port *port, 1296 struct mvpp2_tx_desc *tx_desc, 1297 size_t size) 1298{ 1299 if (port->priv->hw_version == MVPP21) 1300 tx_desc->pp21.data_size = size; 1301 else 1302 tx_desc->pp22.data_size = size; 1303} 1304 1305static void mvpp2_txdesc_txq_set(struct mvpp2_port *port, 1306 struct mvpp2_tx_desc *tx_desc, 1307 unsigned int txq) 1308{ 1309 if (port->priv->hw_version == MVPP21) 1310 tx_desc->pp21.phys_txq = txq; 1311 else 1312 tx_desc->pp22.phys_txq = txq; 1313} 1314 1315static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, 1316 struct mvpp2_tx_desc *tx_desc, 1317 unsigned int command) 1318{ 1319 if (port->priv->hw_version == MVPP21) 1320 tx_desc->pp21.command = command; 1321 else 1322 tx_desc->pp22.command = command; 1323} 1324 1325static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, 1326 struct mvpp2_tx_desc *tx_desc, 1327 unsigned int offset) 1328{ 1329 if (port->priv->hw_version == MVPP21) 1330 tx_desc->pp21.packet_offset = offset; 1331 else 1332 tx_desc->pp22.packet_offset = offset; 1333} 1334 1335static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, 1336 struct mvpp2_rx_desc *rx_desc) 1337{ 1338 if (port->priv->hw_version == MVPP21) 1339 return rx_desc->pp21.buf_dma_addr; 1340 else 1341 return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); 1342} 1343 1344static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, 1345 struct mvpp2_rx_desc *rx_desc) 1346{ 1347 if (port->priv->hw_version == MVPP21) 1348 return rx_desc->pp21.buf_cookie; 1349 else 1350 return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); 1351} 1352 1353static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, 1354 struct mvpp2_rx_desc *rx_desc) 1355{ 1356 if (port->priv->hw_version == MVPP21) 1357 return rx_desc->pp21.data_size; 1358 else 1359 return rx_desc->pp22.data_size; 1360} 1361 1362static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port, 1363 struct mvpp2_rx_desc *rx_desc) 1364{ 1365 if (port->priv->hw_version == MVPP21) 1366 return rx_desc->pp21.status; 1367 else 1368 return rx_desc->pp22.status; 1369} 1370 1371static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) 1372{ 1373 txq_pcpu->txq_get_index++; 1374 if (txq_pcpu->txq_get_index == txq_pcpu->size) 1375 txq_pcpu->txq_get_index = 0; 1376} 1377 1378/* Get number of physical egress port */ 1379static inline int mvpp2_egress_port(struct mvpp2_port *port) 1380{ 1381 return MVPP2_MAX_TCONT + port->id; 1382} 1383 1384/* Get number of physical TXQ */ 1385static inline int mvpp2_txq_phys(int port, int txq) 1386{ 1387 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; 1388} 1389 1390/* Parser configuration routines */ 1391 1392/* Update parser tcam and sram hw entries */ 1393static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1394{ 1395 int i; 1396 1397 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1398 return -EINVAL; 1399 1400 /* Clear entry invalidation bit */ 1401 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 1402 1403 /* Write tcam index - indirect access */ 1404 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1405 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1406 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); 1407 1408 /* Write sram index - indirect access */ 1409 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1410 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1411 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); 1412 1413 return 0; 1414} 1415 1416/* Read tcam entry from hw */ 1417static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) 1418{ 1419 int i; 1420 1421 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) 1422 return -EINVAL; 1423 1424 /* Write tcam index - indirect access */ 1425 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); 1426 1427 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, 1428 MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 1429 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) 1430 return MVPP2_PRS_TCAM_ENTRY_INVALID; 1431 1432 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 1433 pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); 1434 1435 /* Write sram index - indirect access */ 1436 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); 1437 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 1438 pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); 1439 1440 return 0; 1441} 1442 1443/* Invalidate tcam hw entry */ 1444static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) 1445{ 1446 /* Write index - indirect access */ 1447 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 1448 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 1449 MVPP2_PRS_TCAM_INV_MASK); 1450} 1451 1452/* Enable shadow table entry and set its lookup ID */ 1453static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu) 1454{ 1455 priv->prs_shadow[index].valid = true; 1456 priv->prs_shadow[index].lu = lu; 1457} 1458 1459/* Update ri fields in shadow table entry */ 1460static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index, 1461 unsigned int ri, unsigned int ri_mask) 1462{ 1463 priv->prs_shadow[index].ri_mask = ri_mask; 1464 priv->prs_shadow[index].ri = ri; 1465} 1466 1467/* Update lookup field in tcam sw entry */ 1468static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu) 1469{ 1470 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); 1471 1472 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; 1473 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; 1474} 1475 1476/* Update mask for single port in tcam sw entry */ 1477static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, 1478 unsigned int port, bool add) 1479{ 1480 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1481 1482 if (add) 1483 pe->tcam.byte[enable_off] &= ~(1 << port); 1484 else 1485 pe->tcam.byte[enable_off] |= 1 << port; 1486} 1487 1488/* Update port map in tcam sw entry */ 1489static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, 1490 unsigned int ports) 1491{ 1492 unsigned char port_mask = MVPP2_PRS_PORT_MASK; 1493 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1494 1495 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 1496 pe->tcam.byte[enable_off] &= ~port_mask; 1497 pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; 1498} 1499 1500/* Obtain port map from tcam sw entry */ 1501static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) 1502{ 1503 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); 1504 1505 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; 1506} 1507 1508/* Set byte of data and its enable bits in tcam sw entry */ 1509static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, 1510 unsigned int offs, unsigned char byte, 1511 unsigned char enable) 1512{ 1513 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; 1514 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; 1515} 1516 1517/* Get byte of data and its enable bits from tcam sw entry */ 1518static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, 1519 unsigned int offs, unsigned char *byte, 1520 unsigned char *enable) 1521{ 1522 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; 1523 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; 1524} 1525 1526/* Set ethertype in tcam sw entry */ 1527static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset, 1528 unsigned short ethertype) 1529{ 1530 mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); 1531 mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); 1532} 1533 1534/* Set bits in sram sw entry */ 1535static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num, 1536 int val) 1537{ 1538 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); 1539} 1540 1541/* Clear bits in sram sw entry */ 1542static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num, 1543 int val) 1544{ 1545 pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); 1546} 1547 1548/* Update ri bits in sram sw entry */ 1549static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, 1550 unsigned int bits, unsigned int mask) 1551{ 1552 unsigned int i; 1553 1554 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 1555 int ri_off = MVPP2_PRS_SRAM_RI_OFFS; 1556 1557 if (!(mask & BIT(i))) 1558 continue; 1559 1560 if (bits & BIT(i)) 1561 mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); 1562 else 1563 mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); 1564 1565 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 1566 } 1567} 1568 1569/* Update ai bits in sram sw entry */ 1570static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, 1571 unsigned int bits, unsigned int mask) 1572{ 1573 unsigned int i; 1574 int ai_off = MVPP2_PRS_SRAM_AI_OFFS; 1575 1576 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 1577 1578 if (!(mask & BIT(i))) 1579 continue; 1580 1581 if (bits & BIT(i)) 1582 mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); 1583 else 1584 mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); 1585 1586 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 1587 } 1588} 1589 1590/* Read ai bits from sram sw entry */ 1591static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) 1592{ 1593 u8 bits; 1594 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 1595 int ai_en_off = ai_off + 1; 1596 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; 1597 1598 bits = (pe->sram.byte[ai_off] >> ai_shift) | 1599 (pe->sram.byte[ai_en_off] << (8 - ai_shift)); 1600 1601 return bits; 1602} 1603 1604/* In sram sw entry set lookup ID field of the tcam key to be used in the next 1605 * lookup interation 1606 */ 1607static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, 1608 unsigned int lu) 1609{ 1610 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 1611 1612 mvpp2_prs_sram_bits_clear(pe, sram_next_off, 1613 MVPP2_PRS_SRAM_NEXT_LU_MASK); 1614 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); 1615} 1616 1617/* In the sram sw entry set sign and value of the next lookup offset 1618 * and the offset value generated to the classifier 1619 */ 1620static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, 1621 unsigned int op) 1622{ 1623 /* Set sign */ 1624 if (shift < 0) { 1625 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1626 shift = 0 - shift; 1627 } else { 1628 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 1629 } 1630 1631 /* Set value */ 1632 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = 1633 (unsigned char)shift; 1634 1635 /* Reset and set operation */ 1636 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, 1637 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 1638 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); 1639 1640 /* Set base offset as current */ 1641 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1642} 1643 1644/* In the sram sw entry set sign and value of the user defined offset 1645 * generated to the classifier 1646 */ 1647static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, 1648 unsigned int type, int offset, 1649 unsigned int op) 1650{ 1651 /* Set sign */ 1652 if (offset < 0) { 1653 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1654 offset = 0 - offset; 1655 } else { 1656 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 1657 } 1658 1659 /* Set value */ 1660 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, 1661 MVPP2_PRS_SRAM_UDF_MASK); 1662 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); 1663 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1664 MVPP2_PRS_SRAM_UDF_BITS)] &= 1665 ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1666 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + 1667 MVPP2_PRS_SRAM_UDF_BITS)] |= 1668 (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); 1669 1670 /* Set offset type */ 1671 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, 1672 MVPP2_PRS_SRAM_UDF_TYPE_MASK); 1673 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); 1674 1675 /* Set offset operation */ 1676 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, 1677 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 1678 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); 1679 1680 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1681 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= 1682 ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> 1683 (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1684 1685 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + 1686 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= 1687 (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); 1688 1689 /* Set base offset as current */ 1690 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 1691} 1692 1693/* Find parser flow entry */ 1694static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) 1695{ 1696 struct mvpp2_prs_entry *pe; 1697 int tid; 1698 1699 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 1700 if (!pe) 1701 return NULL; 1702 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 1703 1704 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 1705 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { 1706 u8 bits; 1707 1708 if (!priv->prs_shadow[tid].valid || 1709 priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) 1710 continue; 1711 1712 pe->index = tid; 1713 mvpp2_prs_hw_read(priv, pe); 1714 bits = mvpp2_prs_sram_ai_get(pe); 1715 1716 /* Sram store classification lookup ID in AI bits [5:0] */ 1717 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) 1718 return pe; 1719 } 1720 kfree(pe); 1721 1722 return NULL; 1723} 1724 1725/* Return first free tcam index, seeking from start to end */ 1726static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start, 1727 unsigned char end) 1728{ 1729 int tid; 1730 1731 if (start > end) 1732 swap(start, end); 1733 1734 if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) 1735 end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 1736 1737 for (tid = start; tid <= end; tid++) { 1738 if (!priv->prs_shadow[tid].valid) 1739 return tid; 1740 } 1741 1742 return -EINVAL; 1743} 1744 1745/* Enable/disable dropping all mac da's */ 1746static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) 1747{ 1748 struct mvpp2_prs_entry pe; 1749 1750 if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { 1751 /* Entry exist - update port only */ 1752 pe.index = MVPP2_PE_DROP_ALL; 1753 mvpp2_prs_hw_read(priv, &pe); 1754 } else { 1755 /* Entry doesn't exist - create new */ 1756 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1757 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1758 pe.index = MVPP2_PE_DROP_ALL; 1759 1760 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1761 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1762 MVPP2_PRS_RI_DROP_MASK); 1763 1764 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1765 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1766 1767 /* Update shadow table */ 1768 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1769 1770 /* Mask all ports */ 1771 mvpp2_prs_tcam_port_map_set(&pe, 0); 1772 } 1773 1774 /* Update port mask */ 1775 mvpp2_prs_tcam_port_set(&pe, port, add); 1776 1777 mvpp2_prs_hw_write(priv, &pe); 1778} 1779 1780/* Set port to promiscuous mode */ 1781static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add) 1782{ 1783 struct mvpp2_prs_entry pe; 1784 1785 /* Promiscuous mode - Accept unknown packets */ 1786 1787 if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { 1788 /* Entry exist - update port only */ 1789 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1790 mvpp2_prs_hw_read(priv, &pe); 1791 } else { 1792 /* Entry doesn't exist - create new */ 1793 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1794 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1795 pe.index = MVPP2_PE_MAC_PROMISCUOUS; 1796 1797 /* Continue - set next lookup */ 1798 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1799 1800 /* Set result info bits */ 1801 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, 1802 MVPP2_PRS_RI_L2_CAST_MASK); 1803 1804 /* Shift to ethertype */ 1805 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1806 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1807 1808 /* Mask all ports */ 1809 mvpp2_prs_tcam_port_map_set(&pe, 0); 1810 1811 /* Update shadow table */ 1812 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1813 } 1814 1815 /* Update port mask */ 1816 mvpp2_prs_tcam_port_set(&pe, port, add); 1817 1818 mvpp2_prs_hw_write(priv, &pe); 1819} 1820 1821/* Accept multicast */ 1822static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index, 1823 bool add) 1824{ 1825 struct mvpp2_prs_entry pe; 1826 unsigned char da_mc; 1827 1828 /* Ethernet multicast address first byte is 1829 * 0x01 for IPv4 and 0x33 for IPv6 1830 */ 1831 da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 1832 1833 if (priv->prs_shadow[index].valid) { 1834 /* Entry exist - update port only */ 1835 pe.index = index; 1836 mvpp2_prs_hw_read(priv, &pe); 1837 } else { 1838 /* Entry doesn't exist - create new */ 1839 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1840 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1841 pe.index = index; 1842 1843 /* Continue - set next lookup */ 1844 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); 1845 1846 /* Set result info bits */ 1847 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, 1848 MVPP2_PRS_RI_L2_CAST_MASK); 1849 1850 /* Update tcam entry data first byte */ 1851 mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); 1852 1853 /* Shift to ethertype */ 1854 mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN, 1855 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1856 1857 /* Mask all ports */ 1858 mvpp2_prs_tcam_port_map_set(&pe, 0); 1859 1860 /* Update shadow table */ 1861 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1862 } 1863 1864 /* Update port mask */ 1865 mvpp2_prs_tcam_port_set(&pe, port, add); 1866 1867 mvpp2_prs_hw_write(priv, &pe); 1868} 1869 1870/* Parser per-port initialization */ 1871static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first, 1872 int lu_max, int offset) 1873{ 1874 u32 val; 1875 1876 /* Set lookup ID */ 1877 val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); 1878 val &= ~MVPP2_PRS_PORT_LU_MASK(port); 1879 val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); 1880 mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); 1881 1882 /* Set maximum number of loops for packet received from port */ 1883 val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); 1884 val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); 1885 val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); 1886 mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); 1887 1888 /* Set initial offset for packet header extraction for the first 1889 * searching loop 1890 */ 1891 val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); 1892 val &= ~MVPP2_PRS_INIT_OFF_MASK(port); 1893 val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); 1894 mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); 1895} 1896 1897/* Default flow entries initialization for all ports */ 1898static void mvpp2_prs_def_flow_init(struct mvpp2 *priv) 1899{ 1900 struct mvpp2_prs_entry pe; 1901 int port; 1902 1903 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 1904 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1905 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1906 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; 1907 1908 /* Mask all ports */ 1909 mvpp2_prs_tcam_port_map_set(&pe, 0); 1910 1911 /* Set flow ID*/ 1912 mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); 1913 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1914 1915 /* Update shadow table and hw entry */ 1916 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); 1917 mvpp2_prs_hw_write(priv, &pe); 1918 } 1919} 1920 1921/* Set default entry for Marvell Header field */ 1922static void mvpp2_prs_mh_init(struct mvpp2 *priv) 1923{ 1924 struct mvpp2_prs_entry pe; 1925 1926 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1927 1928 pe.index = MVPP2_PE_MH_DEFAULT; 1929 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); 1930 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, 1931 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1932 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); 1933 1934 /* Unmask all ports */ 1935 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1936 1937 /* Update shadow table and hw entry */ 1938 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); 1939 mvpp2_prs_hw_write(priv, &pe); 1940} 1941 1942/* Set default entires (place holder) for promiscuous, non-promiscuous and 1943 * multicast MAC addresses 1944 */ 1945static void mvpp2_prs_mac_init(struct mvpp2 *priv) 1946{ 1947 struct mvpp2_prs_entry pe; 1948 1949 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1950 1951 /* Non-promiscuous mode for all ports - DROP unknown packets */ 1952 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1953 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); 1954 1955 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, 1956 MVPP2_PRS_RI_DROP_MASK); 1957 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1958 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 1959 1960 /* Unmask all ports */ 1961 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 1962 1963 /* Update shadow table and hw entry */ 1964 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); 1965 mvpp2_prs_hw_write(priv, &pe); 1966 1967 /* place holders only - no ports */ 1968 mvpp2_prs_mac_drop_all_set(priv, 0, false); 1969 mvpp2_prs_mac_promisc_set(priv, 0, false); 1970 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); 1971 mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); 1972} 1973 1974/* Match basic ethertypes */ 1975static int mvpp2_prs_etype_init(struct mvpp2 *priv) 1976{ 1977 struct mvpp2_prs_entry pe; 1978 int tid; 1979 1980 /* Ethertype: PPPoE */ 1981 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 1982 MVPP2_PE_LAST_FREE_TID); 1983 if (tid < 0) 1984 return tid; 1985 1986 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 1987 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 1988 pe.index = tid; 1989 1990 mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES); 1991 1992 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, 1993 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1994 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); 1995 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, 1996 MVPP2_PRS_RI_PPPOE_MASK); 1997 1998 /* Update shadow table and hw entry */ 1999 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2000 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2001 priv->prs_shadow[pe.index].finish = false; 2002 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, 2003 MVPP2_PRS_RI_PPPOE_MASK); 2004 mvpp2_prs_hw_write(priv, &pe); 2005 2006 /* Ethertype: ARP */ 2007 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2008 MVPP2_PE_LAST_FREE_TID); 2009 if (tid < 0) 2010 return tid; 2011 2012 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2013 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2014 pe.index = tid; 2015 2016 mvpp2_prs_match_etype(&pe, 0, PROT_ARP); 2017 2018 /* Generate flow in the next iteration*/ 2019 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2020 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2021 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, 2022 MVPP2_PRS_RI_L3_PROTO_MASK); 2023 /* Set L3 offset */ 2024 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2025 MVPP2_ETH_TYPE_LEN, 2026 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2027 2028 /* Update shadow table and hw entry */ 2029 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2030 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2031 priv->prs_shadow[pe.index].finish = true; 2032 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, 2033 MVPP2_PRS_RI_L3_PROTO_MASK); 2034 mvpp2_prs_hw_write(priv, &pe); 2035 2036 /* Ethertype: LBTD */ 2037 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2038 MVPP2_PE_LAST_FREE_TID); 2039 if (tid < 0) 2040 return tid; 2041 2042 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2043 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2044 pe.index = tid; 2045 2046 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); 2047 2048 /* Generate flow in the next iteration*/ 2049 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2050 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2051 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2052 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2053 MVPP2_PRS_RI_CPU_CODE_MASK | 2054 MVPP2_PRS_RI_UDF3_MASK); 2055 /* Set L3 offset */ 2056 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2057 MVPP2_ETH_TYPE_LEN, 2058 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2059 2060 /* Update shadow table and hw entry */ 2061 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2062 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2063 priv->prs_shadow[pe.index].finish = true; 2064 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | 2065 MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2066 MVPP2_PRS_RI_CPU_CODE_MASK | 2067 MVPP2_PRS_RI_UDF3_MASK); 2068 mvpp2_prs_hw_write(priv, &pe); 2069 2070 /* Ethertype: IPv4 without options */ 2071 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2072 MVPP2_PE_LAST_FREE_TID); 2073 if (tid < 0) 2074 return tid; 2075 2076 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2077 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2078 pe.index = tid; 2079 2080 mvpp2_prs_match_etype(&pe, 0, PROT_IP); 2081 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2082 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 2083 MVPP2_PRS_IPV4_HEAD_MASK | 2084 MVPP2_PRS_IPV4_IHL_MASK); 2085 2086 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); 2087 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, 2088 MVPP2_PRS_RI_L3_PROTO_MASK); 2089 /* Skip eth_type + 4 bytes of IP header */ 2090 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, 2091 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2092 /* Set L3 offset */ 2093 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2094 MVPP2_ETH_TYPE_LEN, 2095 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2096 2097 /* Update shadow table and hw entry */ 2098 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2099 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2100 priv->prs_shadow[pe.index].finish = false; 2101 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, 2102 MVPP2_PRS_RI_L3_PROTO_MASK); 2103 mvpp2_prs_hw_write(priv, &pe); 2104 2105 /* Ethertype: IPv4 with options */ 2106 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2107 MVPP2_PE_LAST_FREE_TID); 2108 if (tid < 0) 2109 return tid; 2110 2111 pe.index = tid; 2112 2113 /* Clear tcam data before updating */ 2114 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 2115 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 2116 2117 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, 2118 MVPP2_PRS_IPV4_HEAD, 2119 MVPP2_PRS_IPV4_HEAD_MASK); 2120 2121 /* Clear ri before updating */ 2122 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 2123 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2124 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, 2125 MVPP2_PRS_RI_L3_PROTO_MASK); 2126 2127 /* Update shadow table and hw entry */ 2128 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2129 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2130 priv->prs_shadow[pe.index].finish = false; 2131 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, 2132 MVPP2_PRS_RI_L3_PROTO_MASK); 2133 mvpp2_prs_hw_write(priv, &pe); 2134 2135 /* Ethertype: IPv6 without options */ 2136 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2137 MVPP2_PE_LAST_FREE_TID); 2138 if (tid < 0) 2139 return tid; 2140 2141 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2142 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2143 pe.index = tid; 2144 2145 mvpp2_prs_match_etype(&pe, 0, PROT_IPV6); 2146 2147 /* Skip DIP of IPV6 header */ 2148 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + 2149 MVPP2_MAX_L3_ADDR_SIZE, 2150 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); 2152 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, 2153 MVPP2_PRS_RI_L3_PROTO_MASK); 2154 /* Set L3 offset */ 2155 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2156 MVPP2_ETH_TYPE_LEN, 2157 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2158 2159 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2160 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2161 priv->prs_shadow[pe.index].finish = false; 2162 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, 2163 MVPP2_PRS_RI_L3_PROTO_MASK); 2164 mvpp2_prs_hw_write(priv, &pe); 2165 2166 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 2167 memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); 2168 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); 2169 pe.index = MVPP2_PE_ETH_TYPE_UN; 2170 2171 /* Unmask all ports */ 2172 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); 2173 2174 /* Generate flow in the next iteration*/ 2175 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2176 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); 2177 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, 2178 MVPP2_PRS_RI_L3_PROTO_MASK); 2179 /* Set L3 offset even it's unknown L3 */ 2180 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, 2181 MVPP2_ETH_TYPE_LEN, 2182 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2183 2184 /* Update shadow table and hw entry */ 2185 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); 2186 priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; 2187 priv->prs_shadow[pe.index].finish = true; 2188 mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, 2189 MVPP2_PRS_RI_L3_PROTO_MASK); 2190 mvpp2_prs_hw_write(priv, &pe); 2191 2192 return 0; 2193} 2194 2195/* Parser default initialization */ 2196static int mvpp2_prs_default_init(struct udevice *dev, 2197 struct mvpp2 *priv) 2198{ 2199 int err, index, i; 2200 2201 /* Enable tcam table */ 2202 mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2203 2204 /* Clear all tcam and sram entries */ 2205 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { 2206 mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); 2207 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) 2208 mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2209 2210 mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); 2211 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) 2212 mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2213 } 2214 2215 /* Invalidate all tcam entries */ 2216 for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) 2217 mvpp2_prs_hw_inv(priv, index); 2218 2219 priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE, 2220 sizeof(struct mvpp2_prs_shadow), 2221 GFP_KERNEL); 2222 if (!priv->prs_shadow) 2223 return -ENOMEM; 2224 2225 /* Always start from lookup = 0 */ 2226 for (index = 0; index < MVPP2_MAX_PORTS; index++) 2227 mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, 2228 MVPP2_PRS_PORT_LU_MAX, 0); 2229 2230 mvpp2_prs_def_flow_init(priv); 2231 2232 mvpp2_prs_mh_init(priv); 2233 2234 mvpp2_prs_mac_init(priv); 2235 2236 err = mvpp2_prs_etype_init(priv); 2237 if (err) 2238 return err; 2239 2240 return 0; 2241} 2242 2243/* Compare MAC DA with tcam entry data */ 2244static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, 2245 const u8 *da, unsigned char *mask) 2246{ 2247 unsigned char tcam_byte, tcam_mask; 2248 int index; 2249 2250 for (index = 0; index < ETH_ALEN; index++) { 2251 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); 2252 if (tcam_mask != mask[index]) 2253 return false; 2254 2255 if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) 2256 return false; 2257 } 2258 2259 return true; 2260} 2261 2262/* Find tcam entry with matched pair <MAC DA, port> */ 2263static struct mvpp2_prs_entry * 2264mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, 2265 unsigned char *mask, int udf_type) 2266{ 2267 struct mvpp2_prs_entry *pe; 2268 int tid; 2269 2270 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2271 if (!pe) 2272 return NULL; 2273 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2274 2275 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2276 for (tid = MVPP2_PE_FIRST_FREE_TID; 2277 tid <= MVPP2_PE_LAST_FREE_TID; tid++) { 2278 unsigned int entry_pmap; 2279 2280 if (!priv->prs_shadow[tid].valid || 2281 (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || 2282 (priv->prs_shadow[tid].udf != udf_type)) 2283 continue; 2284 2285 pe->index = tid; 2286 mvpp2_prs_hw_read(priv, pe); 2287 entry_pmap = mvpp2_prs_tcam_port_map_get(pe); 2288 2289 if (mvpp2_prs_mac_range_equals(pe, da, mask) && 2290 entry_pmap == pmap) 2291 return pe; 2292 } 2293 kfree(pe); 2294 2295 return NULL; 2296} 2297 2298/* Update parser's mac da entry */ 2299static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, 2300 const u8 *da, bool add) 2301{ 2302 struct mvpp2_prs_entry *pe; 2303 unsigned int pmap, len, ri; 2304 unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2305 int tid; 2306 2307 /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ 2308 pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, 2309 MVPP2_PRS_UDF_MAC_DEF); 2310 2311 /* No such entry */ 2312 if (!pe) { 2313 if (!add) 2314 return 0; 2315 2316 /* Create new TCAM entry */ 2317 /* Find first range mac entry*/ 2318 for (tid = MVPP2_PE_FIRST_FREE_TID; 2319 tid <= MVPP2_PE_LAST_FREE_TID; tid++) 2320 if (priv->prs_shadow[tid].valid && 2321 (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && 2322 (priv->prs_shadow[tid].udf == 2323 MVPP2_PRS_UDF_MAC_RANGE)) 2324 break; 2325 2326 /* Go through the all entries from first to last */ 2327 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2328 tid - 1); 2329 if (tid < 0) 2330 return tid; 2331 2332 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2333 if (!pe) 2334 return -1; 2335 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 2336 pe->index = tid; 2337 2338 /* Mask all ports */ 2339 mvpp2_prs_tcam_port_map_set(pe, 0); 2340 } 2341 2342 /* Update port mask */ 2343 mvpp2_prs_tcam_port_set(pe, port, add); 2344 2345 /* Invalidate the entry if no ports are left enabled */ 2346 pmap = mvpp2_prs_tcam_port_map_get(pe); 2347 if (pmap == 0) { 2348 if (add) { 2349 kfree(pe); 2350 return -1; 2351 } 2352 mvpp2_prs_hw_inv(priv, pe->index); 2353 priv->prs_shadow[pe->index].valid = false; 2354 kfree(pe); 2355 return 0; 2356 } 2357 2358 /* Continue - set next lookup */ 2359 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); 2360 2361 /* Set match on DA */ 2362 len = ETH_ALEN; 2363 while (len--) 2364 mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); 2365 2366 /* Set result info bits */ 2367 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2368 2369 mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2370 MVPP2_PRS_RI_MAC_ME_MASK); 2371 mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | 2372 MVPP2_PRS_RI_MAC_ME_MASK); 2373 2374 /* Shift to ethertype */ 2375 mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN, 2376 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2377 2378 /* Update shadow table and hw entry */ 2379 priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; 2380 mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); 2381 mvpp2_prs_hw_write(priv, pe); 2382 2383 kfree(pe); 2384 2385 return 0; 2386} 2387 2388static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da) 2389{ 2390 int err; 2391 2392 /* Remove old parser entry */ 2393 err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr, 2394 false); 2395 if (err) 2396 return err; 2397 2398 /* Add new parser entry */ 2399 err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true); 2400 if (err) 2401 return err; 2402 2403 /* Set addr in the device */ 2404 memcpy(port->dev_addr, da, ETH_ALEN); 2405 2406 return 0; 2407} 2408 2409/* Set prs flow for the port */ 2410static int mvpp2_prs_def_flow(struct mvpp2_port *port) 2411{ 2412 struct mvpp2_prs_entry *pe; 2413 int tid; 2414 2415 pe = mvpp2_prs_flow_find(port->priv, port->id); 2416 2417 /* Such entry not exist */ 2418 if (!pe) { 2419 /* Go through the all entires from last to first */ 2420 tid = mvpp2_prs_tcam_first_free(port->priv, 2421 MVPP2_PE_LAST_FREE_TID, 2422 MVPP2_PE_FIRST_FREE_TID); 2423 if (tid < 0) 2424 return tid; 2425 2426 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 2427 if (!pe) 2428 return -ENOMEM; 2429 2430 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); 2431 pe->index = tid; 2432 2433 /* Set flow ID*/ 2434 mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); 2435 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2436 2437 /* Update shadow table */ 2438 mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); 2439 } 2440 2441 mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); 2442 mvpp2_prs_hw_write(port->priv, pe); 2443 kfree(pe); 2444 2445 return 0; 2446} 2447 2448/* Classifier configuration routines */ 2449 2450/* Update classification flow table registers */ 2451static void mvpp2_cls_flow_write(struct mvpp2 *priv, 2452 struct mvpp2_cls_flow_entry *fe) 2453{ 2454 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); 2455 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); 2456 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); 2457 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); 2458} 2459 2460/* Update classification lookup table register */ 2461static void mvpp2_cls_lookup_write(struct mvpp2 *priv, 2462 struct mvpp2_cls_lookup_entry *le) 2463{ 2464 u32 val; 2465 2466 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; 2467 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); 2468 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); 2469} 2470 2471/* Classifier default initialization */ 2472static void mvpp2_cls_init(struct mvpp2 *priv) 2473{ 2474 struct mvpp2_cls_lookup_entry le; 2475 struct mvpp2_cls_flow_entry fe; 2476 int index; 2477 2478 /* Enable classifier */ 2479 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2480 2481 /* Clear classifier flow table */ 2482 memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2483 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { 2484 fe.index = index; 2485 mvpp2_cls_flow_write(priv, &fe); 2486 } 2487 2488 /* Clear classifier lookup table */ 2489 le.data = 0; 2490 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { 2491 le.lkpid = index; 2492 le.way = 0; 2493 mvpp2_cls_lookup_write(priv, &le); 2494 2495 le.way = 1; 2496 mvpp2_cls_lookup_write(priv, &le); 2497 } 2498} 2499 2500static void mvpp2_cls_port_config(struct mvpp2_port *port) 2501{ 2502 struct mvpp2_cls_lookup_entry le; 2503 u32 val; 2504 2505 /* Set way for the port */ 2506 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); 2507 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); 2508 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); 2509 2510 /* Pick the entry to be accessed in lookup ID decoding table 2511 * according to the way and lkpid. 2512 */ 2513 le.lkpid = port->id; 2514 le.way = 0; 2515 le.data = 0; 2516 2517 /* Set initial CPU queue for receiving packets */ 2518 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2519 le.data |= port->first_rxq; 2520 2521 /* Disable classification engines */ 2522 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2523 2524 /* Update lookup ID table entry */ 2525 mvpp2_cls_lookup_write(port->priv, &le); 2526} 2527 2528/* Set CPU queue number for oversize packets */ 2529static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) 2530{ 2531 u32 val; 2532 2533 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), 2534 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); 2535 2536 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), 2537 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 2538 2539 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 2540 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 2541 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 2542} 2543 2544/* Buffer Manager configuration routines */ 2545 2546/* Create pool */ 2547static int mvpp2_bm_pool_create(struct udevice *dev, 2548 struct mvpp2 *priv, 2549 struct mvpp2_bm_pool *bm_pool, int size) 2550{ 2551 u32 val; 2552 2553 /* Number of buffer pointers must be a multiple of 16, as per 2554 * hardware constraints 2555 */ 2556 if (!IS_ALIGNED(size, 16)) 2557 return -EINVAL; 2558 2559 bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id]; 2560 bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id]; 2561 if (!bm_pool->virt_addr) 2562 return -ENOMEM; 2563 2564 if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr, 2565 MVPP2_BM_POOL_PTR_ALIGN)) { 2566 dev_err(dev, "BM pool %d is not %d bytes aligned\n", 2567 bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN); 2568 return -ENOMEM; 2569 } 2570 2571 mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), 2572 lower_32_bits(bm_pool->dma_addr)); 2573 if (priv->hw_version == MVPP22) 2574 mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG, 2575 (upper_32_bits(bm_pool->dma_addr) & 2576 MVPP22_BM_POOL_BASE_HIGH_MASK)); 2577 mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); 2578 2579 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2580 val |= MVPP2_BM_START_MASK; 2581 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2582 2583 bm_pool->type = MVPP2_BM_FREE; 2584 bm_pool->size = size; 2585 bm_pool->pkt_size = 0; 2586 bm_pool->buf_num = 0; 2587 2588 return 0; 2589} 2590 2591/* Set pool buffer size */ 2592static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, 2593 struct mvpp2_bm_pool *bm_pool, 2594 int buf_size) 2595{ 2596 u32 val; 2597 2598 bm_pool->buf_size = buf_size; 2599 2600 val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2601 mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); 2602} 2603 2604/* Free all buffers from the pool */ 2605static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv, 2606 struct mvpp2_bm_pool *bm_pool) 2607{ 2608 int i; 2609 2610 for (i = 0; i < bm_pool->buf_num; i++) { 2611 /* Allocate buffer back from the buffer manager */ 2612 mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); 2613 } 2614 2615 bm_pool->buf_num = 0; 2616} 2617 2618/* Cleanup pool */ 2619static int mvpp2_bm_pool_destroy(struct udevice *dev, 2620 struct mvpp2 *priv, 2621 struct mvpp2_bm_pool *bm_pool) 2622{ 2623 u32 val; 2624 2625 mvpp2_bm_bufs_free(dev, priv, bm_pool); 2626 if (bm_pool->buf_num) { 2627 dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id); 2628 return 0; 2629 } 2630 2631 val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); 2632 val |= MVPP2_BM_STOP_MASK; 2633 mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); 2634 2635 return 0; 2636} 2637 2638static int mvpp2_bm_pools_init(struct udevice *dev, 2639 struct mvpp2 *priv) 2640{ 2641 int i, err, size; 2642 struct mvpp2_bm_pool *bm_pool; 2643 2644 /* Create all pools with maximum size */ 2645 size = MVPP2_BM_POOL_SIZE_MAX; 2646 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2647 bm_pool = &priv->bm_pools[i]; 2648 bm_pool->id = i; 2649 err = mvpp2_bm_pool_create(dev, priv, bm_pool, size); 2650 if (err) 2651 goto err_unroll_pools; 2652 mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE); 2653 } 2654 return 0; 2655 2656err_unroll_pools: 2657 dev_err(dev, "failed to create BM pool %d, size %d\n", i, size); 2658 for (i = i - 1; i >= 0; i--) 2659 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 2660 return err; 2661} 2662 2663static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv) 2664{ 2665 int i, err; 2666 2667 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 2668 /* Mask BM all interrupts */ 2669 mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0); 2670 /* Clear BM cause register */ 2671 mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0); 2672 } 2673 2674 /* Allocate and initialize BM pools */ 2675 priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM, 2676 sizeof(struct mvpp2_bm_pool), GFP_KERNEL); 2677 if (!priv->bm_pools) 2678 return -ENOMEM; 2679 2680 err = mvpp2_bm_pools_init(dev, priv); 2681 if (err < 0) 2682 return err; 2683 return 0; 2684} 2685 2686/* Attach long pool to rxq */ 2687static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port, 2688 int lrxq, int long_pool) 2689{ 2690 u32 val, mask; 2691 int prxq; 2692 2693 /* Get queue physical ID */ 2694 prxq = port->rxqs[lrxq]->id; 2695 2696 if (port->priv->hw_version == MVPP21) 2697 mask = MVPP21_RXQ_POOL_LONG_MASK; 2698 else 2699 mask = MVPP22_RXQ_POOL_LONG_MASK; 2700 2701 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 2702 val &= ~mask; 2703 val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask; 2704 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 2705} 2706 2707/* Set pool number in a BM cookie */ 2708static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) 2709{ 2710 u32 bm; 2711 2712 bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); 2713 bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); 2714 2715 return bm; 2716} 2717 2718/* Get pool number from a BM cookie */ 2719static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) 2720{ 2721 return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; 2722} 2723 2724/* Release buffer to BM */ 2725static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, 2726 dma_addr_t buf_dma_addr, 2727 unsigned long buf_phys_addr) 2728{ 2729 if (port->priv->hw_version == MVPP22) { 2730 u32 val = 0; 2731 2732 if (sizeof(dma_addr_t) == 8) 2733 val |= upper_32_bits(buf_dma_addr) & 2734 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK; 2735 2736 if (sizeof(phys_addr_t) == 8) 2737 val |= (upper_32_bits(buf_phys_addr) 2738 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) & 2739 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK; 2740 2741 mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val); 2742 } 2743 2744 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply 2745 * returned in the "cookie" field of the RX 2746 * descriptor. Instead of storing the virtual address, we 2747 * store the physical address 2748 */ 2749 mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); 2750 mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); 2751} 2752 2753/* Refill BM pool */ 2754static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, 2755 dma_addr_t dma_addr, 2756 phys_addr_t phys_addr) 2757{ 2758 int pool = mvpp2_bm_cookie_pool_get(bm); 2759 2760 mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); 2761} 2762 2763/* Allocate buffers for the pool */ 2764static int mvpp2_bm_bufs_add(struct mvpp2_port *port, 2765 struct mvpp2_bm_pool *bm_pool, int buf_num) 2766{ 2767 int i; 2768 2769 if (buf_num < 0 || 2770 (buf_num + bm_pool->buf_num > bm_pool->size)) { 2771 dev_err(port->phy_dev->dev, 2772 "cannot allocate %d buffers for pool %d\n", buf_num, 2773 bm_pool->id); 2774 return 0; 2775 } 2776 2777 for (i = 0; i < buf_num; i++) { 2778 mvpp2_bm_pool_put(port, bm_pool->id, 2779 (dma_addr_t)buffer_loc.rx_buffer[i], 2780 (unsigned long)buffer_loc.rx_buffer[i]); 2781 2782 } 2783 2784 /* Update BM driver with number of buffers added to pool */ 2785 bm_pool->buf_num += i; 2786 2787 return i; 2788} 2789 2790/* Notify the driver that BM pool is being used as specific type and return the 2791 * pool pointer on success 2792 */ 2793static struct mvpp2_bm_pool * 2794mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 2795 int pkt_size) 2796{ 2797 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 2798 int num; 2799 2800 if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) { 2801 dev_err(port->phy_dev->dev, "mixing pool types is forbidden\n"); 2802 return NULL; 2803 } 2804 2805 if (new_pool->type == MVPP2_BM_FREE) 2806 new_pool->type = type; 2807 2808 /* Allocate buffers in case BM pool is used as long pool, but packet 2809 * size doesn't match MTU or BM pool hasn't being used yet 2810 */ 2811 if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) || 2812 (new_pool->pkt_size == 0)) { 2813 int pkts_num; 2814 2815 /* Set default buffer number or free all the buffers in case 2816 * the pool is not empty 2817 */ 2818 pkts_num = new_pool->buf_num; 2819 if (pkts_num == 0) 2820 pkts_num = type == MVPP2_BM_SWF_LONG ? 2821 MVPP2_BM_LONG_BUF_NUM : 2822 MVPP2_BM_SHORT_BUF_NUM; 2823 else 2824 mvpp2_bm_bufs_free(NULL, 2825 port->priv, new_pool); 2826 2827 new_pool->pkt_size = pkt_size; 2828 2829 /* Allocate buffers for this pool */ 2830 num = mvpp2_bm_bufs_add(port, new_pool, pkts_num); 2831 if (num != pkts_num) { 2832 dev_err(port->phy_dev->dev, 2833 "pool %d: %d of %d allocated\n", new_pool->id, 2834 num, pkts_num); 2835 return NULL; 2836 } 2837 } 2838 2839 return new_pool; 2840} 2841 2842/* Initialize pools for swf */ 2843static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 2844{ 2845 int rxq; 2846 2847 if (!port->pool_long) { 2848 port->pool_long = 2849 mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id), 2850 MVPP2_BM_SWF_LONG, 2851 port->pkt_size); 2852 if (!port->pool_long) 2853 return -ENOMEM; 2854 2855 port->pool_long->port_map |= (1 << port->id); 2856 2857 for (rxq = 0; rxq < rxq_number; rxq++) 2858 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 2859 } 2860 2861 return 0; 2862} 2863 2864/* Port configuration routines */ 2865 2866static void mvpp2_port_mii_set(struct mvpp2_port *port) 2867{ 2868 u32 val; 2869 2870 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2871 2872 switch (port->phy_interface) { 2873 case PHY_INTERFACE_MODE_SGMII: 2874 val |= MVPP2_GMAC_INBAND_AN_MASK; 2875 break; 2876 case PHY_INTERFACE_MODE_1000BASEX: 2877 case PHY_INTERFACE_MODE_2500BASEX: 2878 val &= ~MVPP2_GMAC_INBAND_AN_MASK; 2879 break; 2880 case PHY_INTERFACE_MODE_RGMII: 2881 case PHY_INTERFACE_MODE_RGMII_ID: 2882 val |= MVPP2_GMAC_PORT_RGMII_MASK; 2883 default: 2884 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2885 } 2886 2887 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2888} 2889 2890static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) 2891{ 2892 u32 val; 2893 2894 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2895 val |= MVPP2_GMAC_FC_ADV_EN; 2896 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 2897} 2898 2899static void mvpp2_port_enable(struct mvpp2_port *port) 2900{ 2901 u32 val; 2902 2903 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2904 val |= MVPP2_GMAC_PORT_EN_MASK; 2905 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2906 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2907} 2908 2909static void mvpp2_port_disable(struct mvpp2_port *port) 2910{ 2911 u32 val; 2912 2913 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2914 val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2915 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2916} 2917 2918/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2919static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) 2920{ 2921 u32 val; 2922 2923 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) & 2924 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2925 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2926} 2927 2928/* Configure loopback port */ 2929static void mvpp2_port_loopback_set(struct mvpp2_port *port) 2930{ 2931 u32 val; 2932 2933 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 2934 2935 if (port->speed == 1000) 2936 val |= MVPP2_GMAC_GMII_LB_EN_MASK; 2937 else 2938 val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; 2939 2940 if (port->phy_interface == PHY_INTERFACE_MODE_SGMII || 2941 port->phy_interface == PHY_INTERFACE_MODE_1000BASEX || 2942 port->phy_interface == PHY_INTERFACE_MODE_2500BASEX) 2943 val |= MVPP2_GMAC_PCS_LB_EN_MASK; 2944 else 2945 val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; 2946 2947 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 2948} 2949 2950static void mvpp2_port_reset(struct mvpp2_port *port) 2951{ 2952 u32 val; 2953 2954 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2955 ~MVPP2_GMAC_PORT_RESET_MASK; 2956 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2957 2958 while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) & 2959 MVPP2_GMAC_PORT_RESET_MASK) 2960 continue; 2961} 2962 2963/* Change maximum receive size of the port */ 2964static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) 2965{ 2966 u32 val; 2967 2968 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 2969 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 2970 val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << 2971 MVPP2_GMAC_MAX_RX_SIZE_OFFS); 2972 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 2973} 2974 2975/* PPv2.2 GoP/GMAC config */ 2976 2977/* Set the MAC to reset or exit from reset */ 2978static int gop_gmac_reset(struct mvpp2_port *port, int reset) 2979{ 2980 u32 val; 2981 2982 /* read - modify - write */ 2983 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 2984 if (reset) 2985 val |= MVPP2_GMAC_PORT_RESET_MASK; 2986 else 2987 val &= ~MVPP2_GMAC_PORT_RESET_MASK; 2988 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 2989 2990 return 0; 2991} 2992 2993/* 2994 * gop_gpcs_mode_cfg 2995 * 2996 * Configure port to working with Gig PCS or don't. 2997 */ 2998static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en) 2999{ 3000 u32 val; 3001 3002 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3003 if (en) 3004 val |= MVPP2_GMAC_PCS_ENABLE_MASK; 3005 else 3006 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 3007 /* enable / disable PCS on this port */ 3008 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3009 3010 return 0; 3011} 3012 3013static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en) 3014{ 3015 u32 val; 3016 3017 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3018 if (en) 3019 val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3020 else 3021 val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK; 3022 /* enable / disable PCS on this port */ 3023 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3024 3025 return 0; 3026} 3027 3028static void gop_gmac_sgmii_cfg(struct mvpp2_port *port) 3029{ 3030 u32 val, thresh; 3031 3032 /* 3033 * Configure minimal level of the Tx FIFO before the lower part 3034 * starts to read a packet 3035 */ 3036 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3037 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3038 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3039 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3040 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3041 3042 /* Disable bypass of sync module */ 3043 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3044 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3045 /* configure DP clock select according to mode */ 3046 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3047 /* configure QSGMII bypass according to mode */ 3048 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3049 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3050 3051 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3052 /* configure GIG MAC to SGMII mode */ 3053 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3054 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3055 3056 /* configure AN */ 3057 val = MVPP2_GMAC_EN_PCS_AN | 3058 MVPP2_GMAC_AN_BYPASS_EN | 3059 MVPP2_GMAC_AN_SPEED_EN | 3060 MVPP2_GMAC_EN_FC_AN | 3061 MVPP2_GMAC_AN_DUPLEX_EN | 3062 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3063 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3064} 3065 3066static void gop_gmac_2500basex_cfg(struct mvpp2_port *port) 3067{ 3068 u32 val, thresh; 3069 3070 /* 3071 * Configure minimal level of the Tx FIFO before the lower part 3072 * starts to read a packet 3073 */ 3074 thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH; 3075 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3076 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3077 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3078 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3079 3080 /* Disable bypass of sync module */ 3081 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3082 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3083 /* configure DP clock select according to mode */ 3084 val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3085 /* configure QSGMII bypass according to mode */ 3086 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3087 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3088 3089 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3090 /* 3091 * Configure GIG MAC to 2500Base-X mode connected to a fiber 3092 * transceiver 3093 */ 3094 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3095 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3096 3097 /* In 2500BaseX mode, we can't negotiate speed 3098 * and we do not want InBand autoneg 3099 * bypass enabled (link interrupt storm risk 3100 * otherwise). 3101 */ 3102 val = MVPP2_GMAC_AN_BYPASS_EN | 3103 MVPP2_GMAC_EN_PCS_AN | 3104 MVPP2_GMAC_CONFIG_GMII_SPEED | 3105 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3106 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3107 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3108} 3109 3110static void gop_gmac_1000basex_cfg(struct mvpp2_port *port) 3111{ 3112 u32 val, thresh; 3113 3114 /* 3115 * Configure minimal level of the Tx FIFO before the lower part 3116 * starts to read a packet 3117 */ 3118 thresh = MVPP2_SGMII_TX_FIFO_MIN_TH; 3119 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3120 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3121 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3122 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3123 3124 /* Disable bypass of sync module */ 3125 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3126 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3127 /* configure DP clock select according to mode */ 3128 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3129 /* configure QSGMII bypass according to mode */ 3130 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3131 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3132 3133 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3134 /* configure GIG MAC to 1000BASEX mode */ 3135 val |= MVPP2_GMAC_PORT_TYPE_MASK; 3136 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3137 3138 /* In 1000BaseX mode, we can't negotiate speed (it's 3139 * only 1000), and we do not want InBand autoneg 3140 * bypass enabled (link interrupt storm risk 3141 * otherwise). 3142 */ 3143 val = MVPP2_GMAC_AN_BYPASS_EN | 3144 MVPP2_GMAC_EN_PCS_AN | 3145 MVPP2_GMAC_CONFIG_GMII_SPEED | 3146 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 3147 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3148 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3149} 3150 3151static void gop_gmac_rgmii_cfg(struct mvpp2_port *port) 3152{ 3153 u32 val, thresh; 3154 3155 /* 3156 * Configure minimal level of the Tx FIFO before the lower part 3157 * starts to read a packet 3158 */ 3159 thresh = MVPP2_RGMII_TX_FIFO_MIN_TH; 3160 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3161 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3162 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh); 3163 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3164 3165 /* Disable bypass of sync module */ 3166 val = readl(port->base + MVPP2_GMAC_CTRL_4_REG); 3167 val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK; 3168 /* configure DP clock select according to mode */ 3169 val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK; 3170 val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 3171 val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK; 3172 writel(val, port->base + MVPP2_GMAC_CTRL_4_REG); 3173 3174 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3175 /* configure GIG MAC to SGMII mode */ 3176 val &= ~MVPP2_GMAC_PORT_TYPE_MASK; 3177 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3178 3179 /* configure AN 0xb8e8 */ 3180 val = MVPP2_GMAC_AN_BYPASS_EN | 3181 MVPP2_GMAC_AN_SPEED_EN | 3182 MVPP2_GMAC_EN_FC_AN | 3183 MVPP2_GMAC_AN_DUPLEX_EN | 3184 MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG; 3185 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 3186} 3187 3188/* Set the internal mux's to the required MAC in the GOP */ 3189static int gop_gmac_mode_cfg(struct mvpp2_port *port) 3190{ 3191 u32 val; 3192 3193 /* Set TX FIFO thresholds */ 3194 switch (port->phy_interface) { 3195 case PHY_INTERFACE_MODE_SGMII: 3196 gop_gmac_sgmii_cfg(port); 3197 break; 3198 case PHY_INTERFACE_MODE_1000BASEX: 3199 gop_gmac_1000basex_cfg(port); 3200 break; 3201 3202 case PHY_INTERFACE_MODE_2500BASEX: 3203 gop_gmac_2500basex_cfg(port); 3204 break; 3205 3206 case PHY_INTERFACE_MODE_RGMII: 3207 case PHY_INTERFACE_MODE_RGMII_ID: 3208 gop_gmac_rgmii_cfg(port); 3209 break; 3210 3211 default: 3212 return -1; 3213 } 3214 3215 /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ 3216 val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); 3217 val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3218 val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS; 3219 writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); 3220 3221 /* PeriodicXonEn disable */ 3222 val = readl(port->base + MVPP2_GMAC_CTRL_1_REG); 3223 val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 3224 writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); 3225 3226 return 0; 3227} 3228 3229static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port) 3230{ 3231 u32 val; 3232 3233 /* relevant only for MAC0 (XLG0 and GMAC0) */ 3234 if (port->gop_id > 0) 3235 return; 3236 3237 /* configure 1Gig MAC mode */ 3238 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3239 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3240 val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC; 3241 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3242} 3243 3244static int gop_gpcs_reset(struct mvpp2_port *port, int reset) 3245{ 3246 u32 val; 3247 3248 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); 3249 if (reset) 3250 val &= ~MVPP2_GMAC_SGMII_MODE_MASK; 3251 else 3252 val |= MVPP2_GMAC_SGMII_MODE_MASK; 3253 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 3254 3255 return 0; 3256} 3257 3258static int gop_mpcs_mode(struct mvpp2_port *port) 3259{ 3260 u32 val; 3261 3262 /* configure PCS40G COMMON CONTROL */ 3263 val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET + 3264 PCS40G_COMMON_CONTROL); 3265 val &= ~FORWARD_ERROR_CORRECTION_MASK; 3266 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET + 3267 PCS40G_COMMON_CONTROL); 3268 3269 /* configure PCS CLOCK RESET */ 3270 val = readl(port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET + 3271 PCS_CLOCK_RESET); 3272 val &= ~CLK_DIVISION_RATIO_MASK; 3273 val |= 1 << CLK_DIVISION_RATIO_OFFS; 3274 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET + 3275 PCS_CLOCK_RESET); 3276 3277 val &= ~CLK_DIV_PHASE_SET_MASK; 3278 val |= MAC_CLK_RESET_MASK; 3279 val |= RX_SD_CLK_RESET_MASK; 3280 val |= TX_SD_CLK_RESET_MASK; 3281 writel(val, port->priv->mpcs_base + port->gop_id * MVPP22_PORT_OFFSET + 3282 PCS_CLOCK_RESET); 3283 3284 return 0; 3285} 3286 3287/* Set the internal mux's to the required MAC in the GOP */ 3288static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes) 3289{ 3290 u32 val; 3291 3292 /* configure 10G MAC mode */ 3293 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3294 val |= MVPP22_XLG_RX_FC_EN; 3295 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3296 3297 val = readl(port->base + MVPP22_XLG_CTRL3_REG); 3298 val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK; 3299 val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC; 3300 writel(val, port->base + MVPP22_XLG_CTRL3_REG); 3301 3302 /* read - modify - write */ 3303 val = readl(port->base + MVPP22_XLG_CTRL4_REG); 3304 val &= ~MVPP22_XLG_MODE_DMA_1G; 3305 val |= MVPP22_XLG_FORWARD_PFC_EN; 3306 val |= MVPP22_XLG_FORWARD_802_3X_FC_EN; 3307 val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK; 3308 writel(val, port->base + MVPP22_XLG_CTRL4_REG); 3309 3310 /* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */ 3311 val = readl(port->base + MVPP22_XLG_CTRL1_REG); 3312 val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK; 3313 val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS; 3314 writel(val, port->base + MVPP22_XLG_CTRL1_REG); 3315 3316 /* unmask link change interrupt */ 3317 val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3318 val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE; 3319 val |= 1; /* unmask summary bit */ 3320 writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG); 3321 3322 return 0; 3323} 3324 3325/* Set the MAC to reset or exit from reset */ 3326static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset) 3327{ 3328 u32 val; 3329 3330 /* read - modify - write */ 3331 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3332 if (reset) 3333 val &= ~MVPP22_XLG_MAC_RESETN; 3334 else 3335 val |= MVPP22_XLG_MAC_RESETN; 3336 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3337 3338 return 0; 3339} 3340 3341/* 3342 * gop_port_init 3343 * 3344 * Init physical port. Configures the port mode and all it's elements 3345 * accordingly. 3346 * Does not verify that the selected mode/port number is valid at the 3347 * core level. 3348 */ 3349static int gop_port_init(struct mvpp2_port *port) 3350{ 3351 int mac_num = port->gop_id; 3352 int num_of_act_lanes; 3353 3354 if (mac_num >= MVPP22_GOP_MAC_NUM) { 3355 log_err("illegal port number %d", mac_num); 3356 return -1; 3357 } 3358 3359 switch (port->phy_interface) { 3360 case PHY_INTERFACE_MODE_RGMII: 3361 case PHY_INTERFACE_MODE_RGMII_ID: 3362 gop_gmac_reset(port, 1); 3363 3364 /* configure PCS */ 3365 gop_gpcs_mode_cfg(port, 0); 3366 gop_bypass_clk_cfg(port, 1); 3367 3368 /* configure MAC */ 3369 gop_gmac_mode_cfg(port); 3370 /* pcs unreset */ 3371 gop_gpcs_reset(port, 0); 3372 3373 /* mac unreset */ 3374 gop_gmac_reset(port, 0); 3375 break; 3376 3377 case PHY_INTERFACE_MODE_SGMII: 3378 case PHY_INTERFACE_MODE_1000BASEX: 3379 case PHY_INTERFACE_MODE_2500BASEX: 3380 /* configure PCS */ 3381 gop_gpcs_mode_cfg(port, 1); 3382 3383 /* configure MAC */ 3384 gop_gmac_mode_cfg(port); 3385 /* select proper Mac mode */ 3386 gop_xlg_2_gig_mac_cfg(port); 3387 3388 /* pcs unreset */ 3389 gop_gpcs_reset(port, 0); 3390 /* mac unreset */ 3391 gop_gmac_reset(port, 0); 3392 break; 3393 3394 case PHY_INTERFACE_MODE_10GBASER: 3395 case PHY_INTERFACE_MODE_5GBASER: 3396 case PHY_INTERFACE_MODE_XAUI: 3397 num_of_act_lanes = 2; 3398 mac_num = 0; 3399 /* configure PCS */ 3400 gop_mpcs_mode(port); 3401 /* configure MAC */ 3402 gop_xlg_mac_mode_cfg(port, num_of_act_lanes); 3403 3404 /* mac unreset */ 3405 gop_xlg_mac_reset(port, 0); 3406 break; 3407 3408 default: 3409 log_err("Requested port mode (%d) not supported\n", 3410 port->phy_interface); 3411 return -1; 3412 } 3413 3414 return 0; 3415} 3416 3417static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable) 3418{ 3419 u32 val; 3420 3421 val = readl(port->base + MVPP22_XLG_CTRL0_REG); 3422 if (enable) { 3423 /* Enable port and MIB counters update */ 3424 val |= MVPP22_XLG_PORT_EN; 3425 val &= ~MVPP22_XLG_MIBCNT_DIS; 3426 } else { 3427 /* Disable port */ 3428 val &= ~MVPP22_XLG_PORT_EN; 3429 } 3430 writel(val, port->base + MVPP22_XLG_CTRL0_REG); 3431} 3432 3433static void gop_port_enable(struct mvpp2_port *port, int enable) 3434{ 3435 switch (port->phy_interface) { 3436 case PHY_INTERFACE_MODE_RGMII: 3437 case PHY_INTERFACE_MODE_RGMII_ID: 3438 case PHY_INTERFACE_MODE_SGMII: 3439 case PHY_INTERFACE_MODE_1000BASEX: 3440 case PHY_INTERFACE_MODE_2500BASEX: 3441 if (enable) 3442 mvpp2_port_enable(port); 3443 else 3444 mvpp2_port_disable(port); 3445 break; 3446 3447 case PHY_INTERFACE_MODE_10GBASER: 3448 case PHY_INTERFACE_MODE_5GBASER: 3449 case PHY_INTERFACE_MODE_XAUI: 3450 gop_xlg_mac_port_enable(port, enable); 3451 3452 break; 3453 default: 3454 log_err("%s: Wrong port mode (%d)\n", __func__, 3455 port->phy_interface); 3456 return; 3457 } 3458} 3459 3460/* RFU1 functions */ 3461static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset) 3462{ 3463 return readl(priv->rfu1_base + offset); 3464} 3465 3466static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data) 3467{ 3468 writel(data, priv->rfu1_base + offset); 3469} 3470 3471static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type) 3472{ 3473 u32 val = 0; 3474 3475 if (gop_id == 2) { 3476 if (phy_type == PHY_INTERFACE_MODE_SGMII || 3477 phy_type == PHY_INTERFACE_MODE_1000BASEX || 3478 phy_type == PHY_INTERFACE_MODE_2500BASEX) 3479 val |= MV_NETC_GE_MAC2_SGMII; 3480 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3481 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3482 val |= MV_NETC_GE_MAC2_RGMII; 3483 } 3484 3485 if (gop_id == 3) { 3486 if (phy_type == PHY_INTERFACE_MODE_SGMII || 3487 phy_type == PHY_INTERFACE_MODE_1000BASEX || 3488 phy_type == PHY_INTERFACE_MODE_2500BASEX) 3489 val |= MV_NETC_GE_MAC3_SGMII; 3490 else if (phy_type == PHY_INTERFACE_MODE_RGMII || 3491 phy_type == PHY_INTERFACE_MODE_RGMII_ID) 3492 val |= MV_NETC_GE_MAC3_RGMII; 3493 } 3494 3495 return val; 3496} 3497 3498static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val) 3499{ 3500 u32 reg; 3501 3502 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3503 reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id)); 3504 3505 val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id); 3506 val &= NETC_PORTS_ACTIVE_MASK(gop_id); 3507 3508 reg |= val; 3509 3510 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3511} 3512 3513static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val) 3514{ 3515 u32 reg; 3516 3517 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3518 reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3519 3520 val <<= NETC_GBE_PORT1_MII_MODE_OFFS; 3521 val &= NETC_GBE_PORT1_MII_MODE_MASK; 3522 3523 reg |= val; 3524 3525 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3526} 3527 3528static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val) 3529{ 3530 u32 reg; 3531 3532 reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG); 3533 reg &= ~NETC_GOP_SOFT_RESET_MASK; 3534 3535 val <<= NETC_GOP_SOFT_RESET_OFFS; 3536 val &= NETC_GOP_SOFT_RESET_MASK; 3537 3538 reg |= val; 3539 3540 gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg); 3541} 3542 3543static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val) 3544{ 3545 u32 reg; 3546 3547 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3548 reg &= ~NETC_CLK_DIV_PHASE_MASK; 3549 3550 val <<= NETC_CLK_DIV_PHASE_OFFS; 3551 val &= NETC_CLK_DIV_PHASE_MASK; 3552 3553 reg |= val; 3554 3555 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3556} 3557 3558static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val) 3559{ 3560 u32 reg; 3561 3562 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG); 3563 reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id)); 3564 3565 val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id); 3566 val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id); 3567 3568 reg |= val; 3569 3570 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg); 3571} 3572 3573static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id, 3574 u32 val) 3575{ 3576 u32 reg, mask, offset; 3577 3578 if (gop_id == 2) { 3579 mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3580 offset = NETC_GBE_PORT0_SGMII_MODE_OFFS; 3581 } else { 3582 mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3583 offset = NETC_GBE_PORT1_SGMII_MODE_OFFS; 3584 } 3585 reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG); 3586 reg &= ~mask; 3587 3588 val <<= offset; 3589 val &= mask; 3590 3591 reg |= val; 3592 3593 gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg); 3594} 3595 3596static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val) 3597{ 3598 u32 reg; 3599 3600 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3601 reg &= ~NETC_BUS_WIDTH_SELECT_MASK; 3602 3603 val <<= NETC_BUS_WIDTH_SELECT_OFFS; 3604 val &= NETC_BUS_WIDTH_SELECT_MASK; 3605 3606 reg |= val; 3607 3608 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3609} 3610 3611static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val) 3612{ 3613 u32 reg; 3614 3615 reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG); 3616 reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3617 3618 val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS; 3619 val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3620 3621 reg |= val; 3622 3623 gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg); 3624} 3625 3626static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id, 3627 enum mv_netc_phase phase) 3628{ 3629 switch (phase) { 3630 case MV_NETC_FIRST_PHASE: 3631 /* Set Bus Width to HB mode = 1 */ 3632 gop_netc_bus_width_select(priv, 1); 3633 /* Select RGMII mode */ 3634 gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII); 3635 break; 3636 3637 case MV_NETC_SECOND_PHASE: 3638 /* De-assert the relevant port HB reset */ 3639 gop_netc_port_rf_reset(priv, gop_id, 1); 3640 break; 3641 } 3642} 3643 3644static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id, 3645 enum mv_netc_phase phase) 3646{ 3647 switch (phase) { 3648 case MV_NETC_FIRST_PHASE: 3649 /* Set Bus Width to HB mode = 1 */ 3650 gop_netc_bus_width_select(priv, 1); 3651 /* Select SGMII mode */ 3652 if (gop_id >= 1) { 3653 gop_netc_gbe_sgmii_mode_select(priv, gop_id, 3654 MV_NETC_GBE_SGMII); 3655 } 3656 3657 /* Configure the sample stages */ 3658 gop_netc_sample_stages_timing(priv, 0); 3659 /* Configure the ComPhy Selector */ 3660 /* gop_netc_com_phy_selector_config(netComplex); */ 3661 break; 3662 3663 case MV_NETC_SECOND_PHASE: 3664 /* De-assert the relevant port HB reset */ 3665 gop_netc_port_rf_reset(priv, gop_id, 1); 3666 break; 3667 } 3668} 3669 3670static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase) 3671{ 3672 u32 c = priv->netc_config; 3673 3674 if (c & MV_NETC_GE_MAC2_SGMII) 3675 gop_netc_mac_to_sgmii(priv, 2, phase); 3676 else if (c & MV_NETC_GE_MAC2_RGMII) 3677 gop_netc_mac_to_xgmii(priv, 2, phase); 3678 3679 if (c & MV_NETC_GE_MAC3_SGMII) { 3680 gop_netc_mac_to_sgmii(priv, 3, phase); 3681 } else { 3682 gop_netc_mac_to_xgmii(priv, 3, phase); 3683 if (c & MV_NETC_GE_MAC3_RGMII) 3684 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII); 3685 else 3686 gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII); 3687 } 3688 3689 /* Activate gop ports 0, 2, 3 */ 3690 gop_netc_active_port(priv, 0, 1); 3691 gop_netc_active_port(priv, 2, 1); 3692 gop_netc_active_port(priv, 3, 1); 3693 3694 if (phase == MV_NETC_SECOND_PHASE) { 3695 /* Enable the GOP internal clock logic */ 3696 gop_netc_gop_clock_logic_set(priv, 1); 3697 /* De-assert GOP unit reset */ 3698 gop_netc_gop_reset(priv, 1); 3699 } 3700 3701 return 0; 3702} 3703 3704/* Set defaults to the MVPP2 port */ 3705static void mvpp2_defaults_set(struct mvpp2_port *port) 3706{ 3707 int tx_port_num, val, queue, ptxq, lrxq; 3708 3709 if (port->priv->hw_version == MVPP21) { 3710 /* Configure port to loopback if needed */ 3711 if (port->flags & MVPP2_F_LOOPBACK) 3712 mvpp2_port_loopback_set(port); 3713 3714 /* Update TX FIFO MIN Threshold */ 3715 val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3716 val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; 3717 /* Min. TX threshold must be less than minimal packet length */ 3718 val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); 3719 writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG); 3720 } 3721 3722 /* Disable Legacy WRR, Disable EJP, Release from reset */ 3723 tx_port_num = mvpp2_egress_port(port); 3724 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, 3725 tx_port_num); 3726 mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3727 3728 /* Close bandwidth for all queues */ 3729 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { 3730 ptxq = mvpp2_txq_phys(port->id, queue); 3731 mvpp2_write(port->priv, 3732 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); 3733 } 3734 3735 /* Set refill period to 1 usec, refill tokens 3736 * and bucket size to maximum 3737 */ 3738 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8); 3739 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); 3740 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3741 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); 3742 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3743 mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); 3744 val = MVPP2_TXP_TOKEN_SIZE_MAX; 3745 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 3746 3747 /* Set MaximumLowLatencyPacketSize value to 256 */ 3748 mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), 3749 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | 3750 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); 3751 3752 /* Enable Rx cache snoop */ 3753 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3754 queue = port->rxqs[lrxq]->id; 3755 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3756 val |= MVPP2_SNOOP_PKT_SIZE_MASK | 3757 MVPP2_SNOOP_BUF_HDR_MASK; 3758 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3759 } 3760} 3761 3762/* Enable/disable receiving packets */ 3763static void mvpp2_ingress_enable(struct mvpp2_port *port) 3764{ 3765 u32 val; 3766 int lrxq, queue; 3767 3768 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3769 queue = port->rxqs[lrxq]->id; 3770 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3771 val &= ~MVPP2_RXQ_DISABLE_MASK; 3772 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3773 } 3774} 3775 3776static void mvpp2_ingress_disable(struct mvpp2_port *port) 3777{ 3778 u32 val; 3779 int lrxq, queue; 3780 3781 for (lrxq = 0; lrxq < rxq_number; lrxq++) { 3782 queue = port->rxqs[lrxq]->id; 3783 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); 3784 val |= MVPP2_RXQ_DISABLE_MASK; 3785 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); 3786 } 3787} 3788 3789/* Enable transmit via physical egress queue 3790 * - HW starts take descriptors from DRAM 3791 */ 3792static void mvpp2_egress_enable(struct mvpp2_port *port) 3793{ 3794 u32 qmap; 3795 int queue; 3796 int tx_port_num = mvpp2_egress_port(port); 3797 3798 /* Enable all initialized TXs. */ 3799 qmap = 0; 3800 for (queue = 0; queue < txq_number; queue++) { 3801 struct mvpp2_tx_queue *txq = port->txqs[queue]; 3802 3803 if (txq->descs != NULL) 3804 qmap |= (1 << queue); 3805 } 3806 3807 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3808 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3809} 3810 3811/* Disable transmit via physical egress queue 3812 * - HW doesn't take descriptors from DRAM 3813 */ 3814static void mvpp2_egress_disable(struct mvpp2_port *port) 3815{ 3816 u32 reg_data; 3817 int delay; 3818 int tx_port_num = mvpp2_egress_port(port); 3819 3820 /* Issue stop command for active channels only */ 3821 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3822 reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & 3823 MVPP2_TXP_SCHED_ENQ_MASK; 3824 if (reg_data != 0) 3825 mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, 3826 (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3827 3828 /* Wait for all Tx activity to terminate. */ 3829 delay = 0; 3830 do { 3831 if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3832 dev_warn(port->phy_dev->dev, 3833 "Tx stop timed out, status=0x%08x\n", 3834 reg_data); 3835 break; 3836 } 3837 mdelay(1); 3838 delay++; 3839 3840 /* Check port TX Command register that all 3841 * Tx queues are stopped 3842 */ 3843 reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3844 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); 3845} 3846 3847/* Rx descriptors helper methods */ 3848 3849/* Get number of Rx descriptors occupied by received packets */ 3850static inline int 3851mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id) 3852{ 3853 u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); 3854 3855 return val & MVPP2_RXQ_OCCUPIED_MASK; 3856} 3857 3858/* Update Rx queue status with the number of occupied and available 3859 * Rx descriptor slots. 3860 */ 3861static inline void 3862mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id, 3863 int used_count, int free_count) 3864{ 3865 /* Decrement the number of used descriptors and increment count 3866 * increment the number of free descriptors. 3867 */ 3868 u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); 3869 3870 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); 3871} 3872 3873/* Get pointer to next RX descriptor to be processed by SW */ 3874static inline struct mvpp2_rx_desc * 3875mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) 3876{ 3877 int rx_desc = rxq->next_desc_to_proc; 3878 3879 rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); 3880 prefetch(rxq->descs + rxq->next_desc_to_proc); 3881 return rxq->descs + rx_desc; 3882} 3883 3884/* Set rx queue offset */ 3885static void mvpp2_rxq_offset_set(struct mvpp2_port *port, 3886 int prxq, int offset) 3887{ 3888 u32 val; 3889 3890 /* Convert offset from bytes to units of 32 bytes */ 3891 offset = offset >> 5; 3892 3893 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); 3894 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3895 3896 /* Offset is in */ 3897 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & 3898 MVPP2_RXQ_PACKET_OFFSET_MASK); 3899 3900 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); 3901} 3902 3903/* Obtain BM cookie information from descriptor */ 3904static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, 3905 struct mvpp2_rx_desc *rx_desc) 3906{ 3907 int cpu = smp_processor_id(); 3908 int pool; 3909 3910 pool = (mvpp2_rxdesc_status_get(port, rx_desc) & 3911 MVPP2_RXD_BM_POOL_ID_MASK) >> 3912 MVPP2_RXD_BM_POOL_ID_OFFS; 3913 3914 return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | 3915 ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3916} 3917 3918/* Tx descriptors helper methods */ 3919 3920/* Get number of Tx descriptors waiting to be transmitted by HW */ 3921static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, 3922 struct mvpp2_tx_queue *txq) 3923{ 3924 u32 val; 3925 3926 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 3927 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 3928 3929 return val & MVPP2_TXQ_PENDING_MASK; 3930} 3931 3932/* Get pointer to next Tx descriptor to be processed (send) by HW */ 3933static struct mvpp2_tx_desc * 3934mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) 3935{ 3936 int tx_desc = txq->next_desc_to_proc; 3937 3938 txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); 3939 return txq->descs + tx_desc; 3940} 3941 3942/* Update HW with number of aggregated Tx descriptors to be sent */ 3943static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) 3944{ 3945 /* aggregated access - relevant TXQ number is written in TX desc */ 3946 mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); 3947} 3948 3949/* Get number of sent descriptors and decrement counter. 3950 * The number of sent descriptors is returned. 3951 * Per-CPU access 3952 */ 3953static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, 3954 struct mvpp2_tx_queue *txq) 3955{ 3956 u32 val; 3957 3958 /* Reading status reg resets transmitted descriptor counter */ 3959 val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); 3960 3961 return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> 3962 MVPP2_TRANSMITTED_COUNT_OFFSET; 3963} 3964 3965static void mvpp2_txq_sent_counter_clear(void *arg) 3966{ 3967 struct mvpp2_port *port = arg; 3968 int queue; 3969 3970 for (queue = 0; queue < txq_number; queue++) { 3971 int id = port->txqs[queue]->id; 3972 3973 mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); 3974 } 3975} 3976 3977/* Set max sizes for Tx queues */ 3978static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) 3979{ 3980 u32 val, size, mtu; 3981 int txq, tx_port_num; 3982 3983 mtu = port->pkt_size * 8; 3984 if (mtu > MVPP2_TXP_MTU_MAX) 3985 mtu = MVPP2_TXP_MTU_MAX; 3986 3987 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3988 mtu = 3 * mtu; 3989 3990 /* Indirect access to registers */ 3991 tx_port_num = mvpp2_egress_port(port); 3992 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 3993 3994 /* Set MTU */ 3995 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); 3996 val &= ~MVPP2_TXP_MTU_MAX; 3997 val |= mtu; 3998 mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); 3999 4000 /* TXP token size and all TXQs token size must be larger that MTU */ 4001 val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 4002 size = val & MVPP2_TXP_TOKEN_SIZE_MAX; 4003 if (size < mtu) { 4004 size = mtu; 4005 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 4006 val |= size; 4007 mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); 4008 } 4009 4010 for (txq = 0; txq < txq_number; txq++) { 4011 val = mvpp2_read(port->priv, 4012 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); 4013 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; 4014 4015 if (size < mtu) { 4016 size = mtu; 4017 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 4018 val |= size; 4019 mvpp2_write(port->priv, 4020 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), 4021 val); 4022 } 4023 } 4024} 4025 4026/* Free Tx queue skbuffs */ 4027static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4028 struct mvpp2_tx_queue *txq, 4029 struct mvpp2_txq_pcpu *txq_pcpu, int num) 4030{ 4031 int i; 4032 4033 for (i = 0; i < num; i++) 4034 mvpp2_txq_inc_get(txq_pcpu); 4035} 4036 4037static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, 4038 u32 cause) 4039{ 4040 int queue = fls(cause) - 1; 4041 4042 return port->rxqs[queue]; 4043} 4044 4045static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4046 u32 cause) 4047{ 4048 int queue = fls(cause) - 1; 4049 4050 return port->txqs[queue]; 4051} 4052 4053/* Rx/Tx queue initialization/cleanup methods */ 4054 4055/* Allocate and initialize descriptors for aggr TXQ */ 4056static int mvpp2_aggr_txq_init(struct udevice *dev, 4057 struct mvpp2_tx_queue *aggr_txq, 4058 int desc_num, int cpu, 4059 struct mvpp2 *priv) 4060{ 4061 u32 txq_dma; 4062 4063 /* Allocate memory for TX descriptors */ 4064 aggr_txq->descs = buffer_loc.aggr_tx_descs; 4065 aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs; 4066 if (!aggr_txq->descs) 4067 return -ENOMEM; 4068 4069 /* Make sure descriptor address is cache line size aligned */ 4070 BUG_ON(aggr_txq->descs != 4071 PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4072 4073 aggr_txq->last_desc = aggr_txq->size - 1; 4074 4075 /* Aggr TXQ no reset WA */ 4076 aggr_txq->next_desc_to_proc = mvpp2_read(priv, 4077 MVPP2_AGGR_TXQ_INDEX_REG(cpu)); 4078 4079 /* Set Tx descriptors queue starting address indirect 4080 * access 4081 */ 4082 if (priv->hw_version == MVPP21) 4083 txq_dma = aggr_txq->descs_dma; 4084 else 4085 txq_dma = aggr_txq->descs_dma >> 4086 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; 4087 4088 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); 4089 mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); 4090 4091 return 0; 4092} 4093 4094/* Create a specified Rx queue */ 4095static int mvpp2_rxq_init(struct mvpp2_port *port, 4096 struct mvpp2_rx_queue *rxq) 4097 4098{ 4099 u32 rxq_dma; 4100 4101 rxq->size = port->rx_ring_size; 4102 4103 /* Allocate memory for RX descriptors */ 4104 rxq->descs = buffer_loc.rx_descs; 4105 rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs; 4106 if (!rxq->descs) 4107 return -ENOMEM; 4108 4109 BUG_ON(rxq->descs != 4110 PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4111 4112 rxq->last_desc = rxq->size - 1; 4113 4114 /* Zero occupied and non-occupied counters - direct access */ 4115 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4116 4117 /* Set Rx descriptors queue starting address - indirect access */ 4118 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4119 if (port->priv->hw_version == MVPP21) 4120 rxq_dma = rxq->descs_dma; 4121 else 4122 rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS; 4123 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); 4124 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); 4125 mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); 4126 4127 /* Set Offset */ 4128 mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); 4129 4130 /* Add number of descriptors ready for receiving packets */ 4131 mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); 4132 4133 return 0; 4134} 4135 4136/* Push packets received by the RXQ to BM pool */ 4137static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, 4138 struct mvpp2_rx_queue *rxq) 4139{ 4140 int rx_received, i; 4141 4142 rx_received = mvpp2_rxq_received(port, rxq->id); 4143 if (!rx_received) 4144 return; 4145 4146 for (i = 0; i < rx_received; i++) { 4147 struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); 4148 u32 bm = mvpp2_bm_cookie_build(port, rx_desc); 4149 4150 mvpp2_pool_refill(port, bm, 4151 mvpp2_rxdesc_dma_addr_get(port, rx_desc), 4152 mvpp2_rxdesc_cookie_get(port, rx_desc)); 4153 } 4154 mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); 4155} 4156 4157/* Cleanup Rx queue */ 4158static void mvpp2_rxq_deinit(struct mvpp2_port *port, 4159 struct mvpp2_rx_queue *rxq) 4160{ 4161 mvpp2_rxq_drop_pkts(port, rxq); 4162 4163 rxq->descs = NULL; 4164 rxq->last_desc = 0; 4165 rxq->next_desc_to_proc = 0; 4166 rxq->descs_dma = 0; 4167 4168 /* Clear Rx descriptors queue starting address and size; 4169 * free descriptor number 4170 */ 4171 mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); 4172 mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); 4173 mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 4174 mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 4175} 4176 4177/* Create and initialize a Tx queue */ 4178static int mvpp2_txq_init(struct mvpp2_port *port, 4179 struct mvpp2_tx_queue *txq) 4180{ 4181 u32 val; 4182 int cpu, desc, desc_per_txq, tx_port_num; 4183 struct mvpp2_txq_pcpu *txq_pcpu; 4184 4185 txq->size = port->tx_ring_size; 4186 4187 /* Allocate memory for Tx descriptors */ 4188 txq->descs = buffer_loc.tx_descs; 4189 txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs; 4190 if (!txq->descs) 4191 return -ENOMEM; 4192 4193 /* Make sure descriptor address is cache line size aligned */ 4194 BUG_ON(txq->descs != 4195 PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE)); 4196 4197 txq->last_desc = txq->size - 1; 4198 4199 /* Set Tx descriptors queue starting address - indirect access */ 4200 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4201 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); 4202 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & 4203 MVPP2_TXQ_DESC_SIZE_MASK); 4204 mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); 4205 mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, 4206 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); 4207 val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); 4208 val &= ~MVPP2_TXQ_PENDING_MASK; 4209 mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); 4210 4211 /* Calculate base address in prefetch buffer. We reserve 16 descriptors 4212 * for each existing TXQ. 4213 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT 4214 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS 4215 */ 4216 desc_per_txq = 16; 4217 desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + 4218 (txq->log_id * desc_per_txq); 4219 4220 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, 4221 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | 4222 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); 4223 4224 /* WRR / EJP configuration - indirect access */ 4225 tx_port_num = mvpp2_egress_port(port); 4226 mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); 4227 4228 val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); 4229 val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 4230 val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); 4231 val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 4232 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); 4233 4234 val = MVPP2_TXQ_TOKEN_SIZE_MAX; 4235 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), 4236 val); 4237 4238 for_each_present_cpu(cpu) { 4239 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4240 txq_pcpu->size = txq->size; 4241 } 4242 4243 return 0; 4244} 4245 4246/* Free allocated TXQ resources */ 4247static void mvpp2_txq_deinit(struct mvpp2_port *port, 4248 struct mvpp2_tx_queue *txq) 4249{ 4250 txq->descs = NULL; 4251 txq->last_desc = 0; 4252 txq->next_desc_to_proc = 0; 4253 txq->descs_dma = 0; 4254 4255 /* Set minimum bandwidth for disabled TXQs */ 4256 mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); 4257 4258 /* Set Tx descriptors queue starting address and size */ 4259 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4260 mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 4261 mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 4262} 4263 4264/* Cleanup Tx ports */ 4265static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) 4266{ 4267 struct mvpp2_txq_pcpu *txq_pcpu; 4268 int delay, pending, cpu; 4269 u32 val; 4270 4271 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 4272 val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); 4273 val |= MVPP2_TXQ_DRAIN_EN_MASK; 4274 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4275 4276 /* The napi queue has been stopped so wait for all packets 4277 * to be transmitted. 4278 */ 4279 delay = 0; 4280 do { 4281 if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4282 dev_warn(port->phy_dev->dev, 4283 "port %d: cleaning queue %d timed out\n", 4284 port->id, txq->log_id); 4285 break; 4286 } 4287 mdelay(1); 4288 delay++; 4289 4290 pending = mvpp2_txq_pend_desc_num_get(port, txq); 4291 } while (pending); 4292 4293 val &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4294 mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val); 4295 4296 for_each_present_cpu(cpu) { 4297 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4298 4299 /* Release all packets */ 4300 mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count); 4301 4302 /* Reset queue */ 4303 txq_pcpu->count = 0; 4304 txq_pcpu->txq_put_index = 0; 4305 txq_pcpu->txq_get_index = 0; 4306 } 4307} 4308 4309/* Cleanup all Tx queues */ 4310static void mvpp2_cleanup_txqs(struct mvpp2_port *port) 4311{ 4312 struct mvpp2_tx_queue *txq; 4313 int queue; 4314 u32 val; 4315 4316 val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG); 4317 4318 /* Reset Tx ports and delete Tx queues */ 4319 val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); 4320 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4321 4322 for (queue = 0; queue < txq_number; queue++) { 4323 txq = port->txqs[queue]; 4324 mvpp2_txq_clean(port, txq); 4325 mvpp2_txq_deinit(port, txq); 4326 } 4327 4328 mvpp2_txq_sent_counter_clear(port); 4329 4330 val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id); 4331 mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); 4332} 4333 4334/* Cleanup all Rx queues */ 4335static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) 4336{ 4337 int queue; 4338 4339 for (queue = 0; queue < rxq_number; queue++) 4340 mvpp2_rxq_deinit(port, port->rxqs[queue]); 4341} 4342 4343/* Init all Rx queues for port */ 4344static int mvpp2_setup_rxqs(struct mvpp2_port *port) 4345{ 4346 int queue, err; 4347 4348 for (queue = 0; queue < rxq_number; queue++) { 4349 err = mvpp2_rxq_init(port, port->rxqs[queue]); 4350 if (err) 4351 goto err_cleanup; 4352 } 4353 return 0; 4354 4355err_cleanup: 4356 mvpp2_cleanup_rxqs(port); 4357 return err; 4358} 4359 4360/* Init all tx queues for port */ 4361static int mvpp2_setup_txqs(struct mvpp2_port *port) 4362{ 4363 struct mvpp2_tx_queue *txq; 4364 int queue, err; 4365 4366 for (queue = 0; queue < txq_number; queue++) { 4367 txq = port->txqs[queue]; 4368 err = mvpp2_txq_init(port, txq); 4369 if (err) 4370 goto err_cleanup; 4371 } 4372 4373 mvpp2_txq_sent_counter_clear(port); 4374 return 0; 4375 4376err_cleanup: 4377 mvpp2_cleanup_txqs(port); 4378 return err; 4379} 4380 4381/* Adjust link */ 4382static void mvpp2_link_event(struct mvpp2_port *port) 4383{ 4384 struct phy_device *phydev = port->phy_dev; 4385 int status_change = 0; 4386 u32 val; 4387 4388 if (phydev->link) { 4389 if ((port->speed != phydev->speed) || 4390 (port->duplex != phydev->duplex)) { 4391 u32 val; 4392 4393 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4394 val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED | 4395 MVPP2_GMAC_CONFIG_GMII_SPEED | 4396 MVPP2_GMAC_CONFIG_FULL_DUPLEX | 4397 MVPP2_GMAC_AN_SPEED_EN | 4398 MVPP2_GMAC_AN_DUPLEX_EN); 4399 4400 if (phydev->duplex) 4401 val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX; 4402 4403 if (phydev->speed == SPEED_1000 || 4404 phydev->speed == 2500) 4405 val |= MVPP2_GMAC_CONFIG_GMII_SPEED; 4406 else if (phydev->speed == SPEED_100) 4407 val |= MVPP2_GMAC_CONFIG_MII_SPEED; 4408 4409 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4410 4411 port->duplex = phydev->duplex; 4412 port->speed = phydev->speed; 4413 } 4414 } 4415 4416 if (phydev->link != port->link) { 4417 if (!phydev->link) { 4418 port->duplex = -1; 4419 port->speed = 0; 4420 } 4421 4422 port->link = phydev->link; 4423 status_change = 1; 4424 } 4425 4426 if (status_change) { 4427 if (phydev->link) { 4428 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4429 val |= (MVPP2_GMAC_FORCE_LINK_PASS | 4430 MVPP2_GMAC_FORCE_LINK_DOWN); 4431 writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); 4432 mvpp2_egress_enable(port); 4433 mvpp2_ingress_enable(port); 4434 } else { 4435 mvpp2_ingress_disable(port); 4436 mvpp2_egress_disable(port); 4437 } 4438 } 4439} 4440 4441/* Main RX/TX processing routines */ 4442 4443/* Display more error info */ 4444static void mvpp2_rx_error(struct mvpp2_port *port, 4445 struct mvpp2_rx_desc *rx_desc) 4446{ 4447 u32 status = mvpp2_rxdesc_status_get(port, rx_desc); 4448 size_t sz = mvpp2_rxdesc_size_get(port, rx_desc); 4449 4450 switch (status & MVPP2_RXD_ERR_CODE_MASK) { 4451 case MVPP2_RXD_ERR_CRC: 4452 dev_err(port->phy_dev->dev, 4453 "bad rx status %08x (crc error), size=%zu\n", status, 4454 sz); 4455 break; 4456 case MVPP2_RXD_ERR_OVERRUN: 4457 dev_err(port->phy_dev->dev, 4458 "bad rx status %08x (overrun error), size=%zu\n", 4459 status, sz); 4460 break; 4461 case MVPP2_RXD_ERR_RESOURCE: 4462 dev_err(port->phy_dev->dev, 4463 "bad rx status %08x (resource error), size=%zu\n", 4464 status, sz); 4465 break; 4466 } 4467} 4468 4469/* Reuse skb if possible, or allocate a new skb and add it to BM pool */ 4470static int mvpp2_rx_refill(struct mvpp2_port *port, 4471 struct mvpp2_bm_pool *bm_pool, 4472 u32 bm, dma_addr_t dma_addr) 4473{ 4474 mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr); 4475 return 0; 4476} 4477 4478/* Set hw internals when starting port */ 4479static void mvpp2_start_dev(struct mvpp2_port *port) 4480{ 4481 switch (port->phy_interface) { 4482 case PHY_INTERFACE_MODE_RGMII: 4483 case PHY_INTERFACE_MODE_RGMII_ID: 4484 case PHY_INTERFACE_MODE_SGMII: 4485 case PHY_INTERFACE_MODE_1000BASEX: 4486 case PHY_INTERFACE_MODE_2500BASEX: 4487 mvpp2_gmac_max_rx_size_set(port); 4488 default: 4489 break; 4490 } 4491 4492 mvpp2_txp_max_tx_size_set(port); 4493 4494 if (port->priv->hw_version == MVPP21) 4495 mvpp2_port_enable(port); 4496 else 4497 gop_port_enable(port, 1); 4498} 4499 4500/* Set hw internals when stopping port */ 4501static void mvpp2_stop_dev(struct mvpp2_port *port) 4502{ 4503 /* Stop new packets from arriving to RXQs */ 4504 mvpp2_ingress_disable(port); 4505 4506 mvpp2_egress_disable(port); 4507 4508 if (port->priv->hw_version == MVPP21) 4509 mvpp2_port_disable(port); 4510 else 4511 gop_port_enable(port, 0); 4512} 4513 4514static void mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port) 4515{ 4516 struct phy_device *phy_dev; 4517 4518 if (!port->init || port->link == 0) { 4519 phy_dev = dm_mdio_phy_connect(port->mdio_dev, port->phyaddr, 4520 dev, port->phy_interface); 4521 4522 /* 4523 * If the phy doesn't match with any existing u-boot drivers the 4524 * phy framework will connect it to generic one which 4525 * uid == 0xffffffff. In this case act as if the phy wouldn't be 4526 * declared in dts. Otherwise in case of 3310 (for which the 4527 * driver doesn't exist) the link will not be correctly 4528 * detected. Removing phy entry from dts in case of 3310 is not 4529 * an option because it is required for the phy_fw_down 4530 * procedure. 4531 */ 4532 if (phy_dev && 4533 phy_dev->drv->uid == 0xffffffff) {/* Generic phy */ 4534 dev_warn(port->phy_dev->dev, 4535 "Marking phy as invalid, link will not be checked\n"); 4536 /* set phy_addr to invalid value */ 4537 port->phyaddr = PHY_MAX_ADDR; 4538 mvpp2_egress_enable(port); 4539 mvpp2_ingress_enable(port); 4540 4541 return; 4542 } 4543 4544 port->phy_dev = phy_dev; 4545 if (!phy_dev) { 4546 dev_err(port->phy_dev->dev, "cannot connect to phy\n"); 4547 return; 4548 } 4549 phy_dev->supported &= PHY_GBIT_FEATURES; 4550 phy_dev->advertising = phy_dev->supported; 4551 4552 port->phy_dev = phy_dev; 4553 port->link = 0; 4554 port->duplex = 0; 4555 port->speed = 0; 4556 4557 phy_config(phy_dev); 4558 phy_startup(phy_dev); 4559 if (!phy_dev->link) 4560 printf("%s: No link\n", phy_dev->dev->name); 4561 else 4562 port->init = 1; 4563 } else { 4564 mvpp2_egress_enable(port); 4565 mvpp2_ingress_enable(port); 4566 } 4567} 4568 4569static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port) 4570{ 4571 unsigned char mac_bcast[ETH_ALEN] = { 4572 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 4573 int err; 4574 4575 err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true); 4576 if (err) { 4577 dev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n"); 4578 return err; 4579 } 4580 err = mvpp2_prs_mac_da_accept(port->priv, port->id, 4581 port->dev_addr, true); 4582 if (err) { 4583 dev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n"); 4584 return err; 4585 } 4586 err = mvpp2_prs_def_flow(port); 4587 if (err) { 4588 dev_err(dev, "mvpp2_prs_def_flow failed\n"); 4589 return err; 4590 } 4591 4592 /* Allocate the Rx/Tx queues */ 4593 err = mvpp2_setup_rxqs(port); 4594 if (err) { 4595 dev_err(port->phy_dev->dev, "cannot allocate Rx queues\n"); 4596 return err; 4597 } 4598 4599 err = mvpp2_setup_txqs(port); 4600 if (err) { 4601 dev_err(port->phy_dev->dev, "cannot allocate Tx queues\n"); 4602 return err; 4603 } 4604 4605 if (port->phyaddr < PHY_MAX_ADDR) { 4606 mvpp2_phy_connect(dev, port); 4607 mvpp2_link_event(port); 4608 } else { 4609 mvpp2_egress_enable(port); 4610 mvpp2_ingress_enable(port); 4611 } 4612 4613 mvpp2_start_dev(port); 4614 4615 return 0; 4616} 4617 4618/* No Device ops here in U-Boot */ 4619 4620/* Driver initialization */ 4621 4622static void mvpp2_port_power_up(struct mvpp2_port *port) 4623{ 4624 struct mvpp2 *priv = port->priv; 4625 4626 /* On PPv2.2 the GoP / interface configuration has already been done */ 4627 if (priv->hw_version == MVPP21) 4628 mvpp2_port_mii_set(port); 4629 mvpp2_port_periodic_xon_disable(port); 4630 if (priv->hw_version == MVPP21) 4631 mvpp2_port_fc_adv_enable(port); 4632 mvpp2_port_reset(port); 4633} 4634 4635/* Initialize port HW */ 4636static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port) 4637{ 4638 struct mvpp2 *priv = port->priv; 4639 struct mvpp2_txq_pcpu *txq_pcpu; 4640 int queue, cpu, err; 4641 4642 if (port->first_rxq + rxq_number > 4643 MVPP2_MAX_PORTS * priv->max_port_rxqs) 4644 return -EINVAL; 4645 4646 /* Disable port */ 4647 mvpp2_egress_disable(port); 4648 if (priv->hw_version == MVPP21) 4649 mvpp2_port_disable(port); 4650 else 4651 gop_port_enable(port, 0); 4652 4653 port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), 4654 GFP_KERNEL); 4655 if (!port->txqs) 4656 return -ENOMEM; 4657 4658 /* Associate physical Tx queues to this port and initialize. 4659 * The mapping is predefined. 4660 */ 4661 for (queue = 0; queue < txq_number; queue++) { 4662 int queue_phy_id = mvpp2_txq_phys(port->id, queue); 4663 struct mvpp2_tx_queue *txq; 4664 4665 txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL); 4666 if (!txq) 4667 return -ENOMEM; 4668 4669 txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu), 4670 GFP_KERNEL); 4671 if (!txq->pcpu) 4672 return -ENOMEM; 4673 4674 txq->id = queue_phy_id; 4675 txq->log_id = queue; 4676 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH; 4677 for_each_present_cpu(cpu) { 4678 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4679 txq_pcpu->cpu = cpu; 4680 } 4681 4682 port->txqs[queue] = txq; 4683 } 4684 4685 port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), 4686 GFP_KERNEL); 4687 if (!port->rxqs) 4688 return -ENOMEM; 4689 4690 /* Allocate and initialize Rx queue for this port */ 4691 for (queue = 0; queue < rxq_number; queue++) { 4692 struct mvpp2_rx_queue *rxq; 4693 4694 /* Map physical Rx queue to port's logical Rx queue */ 4695 rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL); 4696 if (!rxq) 4697 return -ENOMEM; 4698 /* Map this Rx queue to a physical queue */ 4699 rxq->id = port->first_rxq + queue; 4700 rxq->port = port->id; 4701 rxq->logic_rxq = queue; 4702 4703 port->rxqs[queue] = rxq; 4704 } 4705 4706 4707 /* Create Rx descriptor rings */ 4708 for (queue = 0; queue < rxq_number; queue++) { 4709 struct mvpp2_rx_queue *rxq = port->rxqs[queue]; 4710 4711 rxq->size = port->rx_ring_size; 4712 rxq->pkts_coal = MVPP2_RX_COAL_PKTS; 4713 rxq->time_coal = MVPP2_RX_COAL_USEC; 4714 } 4715 4716 mvpp2_ingress_disable(port); 4717 4718 /* Port default configuration */ 4719 mvpp2_defaults_set(port); 4720 4721 /* Port's classifier configuration */ 4722 mvpp2_cls_oversize_rxq_set(port); 4723 mvpp2_cls_port_config(port); 4724 4725 /* Provide an initial Rx packet size */ 4726 port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN); 4727 4728 /* Initialize pools for swf */ 4729 err = mvpp2_swf_bm_pool_init(port); 4730 if (err) 4731 return err; 4732 4733 return 0; 4734} 4735 4736static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port) 4737{ 4738 int port_node = dev_of_offset(dev); 4739 int phy_node; 4740 u32 id; 4741 u32 phyaddr = 0; 4742 int fixed_link = 0; 4743 int ret; 4744 4745 phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy"); 4746 fixed_link = fdt_subnode_offset(gd->fdt_blob, port_node, "fixed-link"); 4747 4748 if (phy_node > 0) { 4749 int parent; 4750 4751 if (fixed_link != -FDT_ERR_NOTFOUND) { 4752 /* phy_addr is set to invalid value for fixed links */ 4753 phyaddr = PHY_MAX_ADDR; 4754 } else { 4755 phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, 4756 "reg", 0); 4757 if (phyaddr < 0) { 4758 dev_err(dev, "could not find phy address\n"); 4759 return -1; 4760 } 4761 } 4762 parent = fdt_parent_offset(gd->fdt_blob, phy_node); 4763 ret = uclass_get_device_by_of_offset(UCLASS_MDIO, parent, 4764 &port->mdio_dev); 4765 if (ret) 4766 return ret; 4767 } else { 4768 /* phy_addr is set to invalid value */ 4769 phyaddr = PHY_MAX_ADDR; 4770 } 4771 4772 port->phy_interface = dev_read_phy_mode(dev); 4773 if (port->phy_interface == PHY_INTERFACE_MODE_NA) { 4774 dev_err(dev, "incorrect phy mode\n"); 4775 return -EINVAL; 4776 } 4777 4778 id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1); 4779 if (id == -1) { 4780 dev_err(dev, "missing port-id value\n"); 4781 return -EINVAL; 4782 } 4783 4784#if CONFIG_IS_ENABLED(DM_GPIO) 4785 gpio_request_by_name(dev, "phy-reset-gpios", 0, 4786 &port->phy_reset_gpio, GPIOD_IS_OUT); 4787 gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0, 4788 &port->phy_tx_disable_gpio, GPIOD_IS_OUT); 4789#endif 4790 4791 port->id = id; 4792 if (port->priv->hw_version == MVPP21) 4793 port->first_rxq = port->id * rxq_number; 4794 else 4795 port->first_rxq = port->id * port->priv->max_port_rxqs; 4796 port->phyaddr = phyaddr; 4797 4798 return 0; 4799} 4800 4801#if CONFIG_IS_ENABLED(DM_GPIO) 4802/* Port GPIO initialization */ 4803static void mvpp2_gpio_init(struct mvpp2_port *port) 4804{ 4805 if (dm_gpio_is_valid(&port->phy_reset_gpio)) { 4806 dm_gpio_set_value(&port->phy_reset_gpio, 1); 4807 mdelay(10); 4808 dm_gpio_set_value(&port->phy_reset_gpio, 0); 4809 } 4810 4811 if (dm_gpio_is_valid(&port->phy_tx_disable_gpio)) 4812 dm_gpio_set_value(&port->phy_tx_disable_gpio, 0); 4813} 4814#endif 4815 4816/* Ports initialization */ 4817static int mvpp2_port_probe(struct udevice *dev, 4818 struct mvpp2_port *port, 4819 int port_node, 4820 struct mvpp2 *priv) 4821{ 4822 int err; 4823 4824 port->tx_ring_size = MVPP2_MAX_TXD; 4825 port->rx_ring_size = MVPP2_MAX_RXD; 4826 4827 err = mvpp2_port_init(dev, port); 4828 if (err < 0) { 4829 dev_err(dev, "failed to init port %d\n", port->id); 4830 return err; 4831 } 4832 mvpp2_port_power_up(port); 4833 4834#if CONFIG_IS_ENABLED(DM_GPIO) 4835 mvpp2_gpio_init(port); 4836#endif 4837 4838 priv->port_list[port->id] = port; 4839 priv->num_ports++; 4840 return 0; 4841} 4842 4843/* Initialize decoding windows */ 4844static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram, 4845 struct mvpp2 *priv) 4846{ 4847 u32 win_enable; 4848 int i; 4849 4850 for (i = 0; i < 6; i++) { 4851 mvpp2_write(priv, MVPP2_WIN_BASE(i), 0); 4852 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0); 4853 4854 if (i < 4) 4855 mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0); 4856 } 4857 4858 win_enable = 0; 4859 4860 for (i = 0; i < dram->num_cs; i++) { 4861 const struct mbus_dram_window *cs = dram->cs + i; 4862 4863 mvpp2_write(priv, MVPP2_WIN_BASE(i), 4864 (cs->base & 0xffff0000) | (cs->mbus_attr << 8) | 4865 dram->mbus_dram_target_id); 4866 4867 mvpp2_write(priv, MVPP2_WIN_SIZE(i), 4868 (cs->size - 1) & 0xffff0000); 4869 4870 win_enable |= (1 << i); 4871 } 4872 4873 mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable); 4874} 4875 4876/* Initialize Rx FIFO's */ 4877static void mvpp2_rx_fifo_init(struct mvpp2 *priv) 4878{ 4879 int port; 4880 4881 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4882 if (priv->hw_version == MVPP22) { 4883 if (port == 0) { 4884 mvpp2_write(priv, 4885 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4886 MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE); 4887 mvpp2_write(priv, 4888 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4889 MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE); 4890 } else if (port == 1) { 4891 mvpp2_write(priv, 4892 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4893 MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE); 4894 mvpp2_write(priv, 4895 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4896 MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE); 4897 } else { 4898 mvpp2_write(priv, 4899 MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4900 MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE); 4901 mvpp2_write(priv, 4902 MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4903 MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE); 4904 } 4905 } else { 4906 mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), 4907 MVPP21_RX_FIFO_PORT_DATA_SIZE); 4908 mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), 4909 MVPP21_RX_FIFO_PORT_ATTR_SIZE); 4910 } 4911 } 4912 4913 mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, 4914 MVPP2_RX_FIFO_PORT_MIN_PKT); 4915 mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 4916} 4917 4918/* Initialize Tx FIFO's */ 4919static void mvpp2_tx_fifo_init(struct mvpp2 *priv) 4920{ 4921 int port, val; 4922 4923 for (port = 0; port < MVPP2_MAX_PORTS; port++) { 4924 /* Port 0 supports 10KB TX FIFO */ 4925 if (port == 0) { 4926 val = MVPP2_TX_FIFO_DATA_SIZE_10KB & 4927 MVPP22_TX_FIFO_SIZE_MASK; 4928 } else { 4929 val = MVPP2_TX_FIFO_DATA_SIZE_3KB & 4930 MVPP22_TX_FIFO_SIZE_MASK; 4931 } 4932 mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val); 4933 } 4934} 4935 4936static void mvpp2_axi_init(struct mvpp2 *priv) 4937{ 4938 u32 val, rdval, wrval; 4939 4940 mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0); 4941 4942 /* AXI Bridge Configuration */ 4943 4944 rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE 4945 << MVPP22_AXI_ATTR_CACHE_OFFS; 4946 rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4947 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4948 4949 wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE 4950 << MVPP22_AXI_ATTR_CACHE_OFFS; 4951 wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4952 << MVPP22_AXI_ATTR_DOMAIN_OFFS; 4953 4954 /* BM */ 4955 mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval); 4956 mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval); 4957 4958 /* Descriptors */ 4959 mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval); 4960 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval); 4961 mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval); 4962 mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval); 4963 4964 /* Buffer Data */ 4965 mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval); 4966 mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval); 4967 4968 val = MVPP22_AXI_CODE_CACHE_NON_CACHE 4969 << MVPP22_AXI_CODE_CACHE_OFFS; 4970 val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM 4971 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4972 mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val); 4973 mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val); 4974 4975 val = MVPP22_AXI_CODE_CACHE_RD_CACHE 4976 << MVPP22_AXI_CODE_CACHE_OFFS; 4977 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4978 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4979 4980 mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val); 4981 4982 val = MVPP22_AXI_CODE_CACHE_WR_CACHE 4983 << MVPP22_AXI_CODE_CACHE_OFFS; 4984 val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM 4985 << MVPP22_AXI_CODE_DOMAIN_OFFS; 4986 4987 mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val); 4988} 4989 4990/* Initialize network controller common part HW */ 4991static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv) 4992{ 4993 const struct mbus_dram_target_info *dram_target_info; 4994 int err, i; 4995 u32 val; 4996 4997 /* Checks for hardware constraints (U-Boot uses only one rxq) */ 4998 if ((rxq_number > priv->max_port_rxqs) || 4999 (txq_number > MVPP2_MAX_TXQ)) { 5000 dev_err(dev, "invalid queue size parameter\n"); 5001 return -EINVAL; 5002 } 5003 5004 if (priv->hw_version == MVPP22) 5005 mvpp2_axi_init(priv); 5006 else { 5007 /* MBUS windows configuration */ 5008 dram_target_info = mvebu_mbus_dram_info(); 5009 if (dram_target_info) 5010 mvpp2_conf_mbus_windows(dram_target_info, priv); 5011 } 5012 5013 if (priv->hw_version == MVPP21) { 5014 /* Disable HW PHY polling */ 5015 val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5016 val |= MVPP2_PHY_AN_STOP_SMI0_MASK; 5017 writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG); 5018 } else { 5019 /* Enable HW PHY polling */ 5020 val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5021 val |= MVPP22_SMI_POLLING_EN; 5022 writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG); 5023 } 5024 5025 /* Allocate and initialize aggregated TXQs */ 5026 priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(), 5027 sizeof(struct mvpp2_tx_queue), 5028 GFP_KERNEL); 5029 if (!priv->aggr_txqs) 5030 return -ENOMEM; 5031 5032 for_each_present_cpu(i) { 5033 priv->aggr_txqs[i].id = i; 5034 priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; 5035 err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i], 5036 MVPP2_AGGR_TXQ_SIZE, i, priv); 5037 if (err < 0) 5038 return err; 5039 } 5040 5041 /* Rx Fifo Init */ 5042 mvpp2_rx_fifo_init(priv); 5043 5044 /* Tx Fifo Init */ 5045 if (priv->hw_version == MVPP22) 5046 mvpp2_tx_fifo_init(priv); 5047 5048 if (priv->hw_version == MVPP21) 5049 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, 5050 priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); 5051 5052 /* Allow cache snoop when transmiting packets */ 5053 mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1); 5054 5055 /* Buffer Manager initialization */ 5056 err = mvpp2_bm_init(dev, priv); 5057 if (err < 0) 5058 return err; 5059 5060 /* Parser default initialization */ 5061 err = mvpp2_prs_default_init(dev, priv); 5062 if (err < 0) 5063 return err; 5064 5065 /* Classifier default initialization */ 5066 mvpp2_cls_init(priv); 5067 5068 return 0; 5069} 5070 5071static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp) 5072{ 5073 struct mvpp2_port *port = dev_get_priv(dev); 5074 struct mvpp2_rx_desc *rx_desc; 5075 struct mvpp2_bm_pool *bm_pool; 5076 dma_addr_t dma_addr; 5077 u32 bm, rx_status; 5078 int pool, rx_bytes, err; 5079 int rx_received; 5080 struct mvpp2_rx_queue *rxq; 5081 u8 *data; 5082 5083 if (port->phyaddr < PHY_MAX_ADDR) 5084 if (!port->phy_dev->link) 5085 return 0; 5086 5087 /* Process RX packets */ 5088 rxq = port->rxqs[0]; 5089 5090 /* Get number of received packets and clamp the to-do */ 5091 rx_received = mvpp2_rxq_received(port, rxq->id); 5092 5093 /* Return if no packets are received */ 5094 if (!rx_received) 5095 return 0; 5096 5097 rx_desc = mvpp2_rxq_next_desc_get(rxq); 5098 rx_status = mvpp2_rxdesc_status_get(port, rx_desc); 5099 rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc); 5100 rx_bytes -= MVPP2_MH_SIZE; 5101 dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc); 5102 5103 bm = mvpp2_bm_cookie_build(port, rx_desc); 5104 pool = mvpp2_bm_cookie_pool_get(bm); 5105 bm_pool = &port->priv->bm_pools[pool]; 5106 5107 /* In case of an error, release the requested buffer pointer 5108 * to the Buffer Manager. This request process is controlled 5109 * by the hardware, and the information about the buffer is 5110 * comprised by the RX descriptor. 5111 */ 5112 if (rx_status & MVPP2_RXD_ERR_SUMMARY) { 5113 mvpp2_rx_error(port, rx_desc); 5114 /* Return the buffer to the pool */ 5115 mvpp2_pool_refill(port, bm, dma_addr, dma_addr); 5116 return 0; 5117 } 5118 5119 err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr); 5120 if (err) { 5121 dev_err(port->phy_dev->dev, "failed to refill BM pools\n"); 5122 return 0; 5123 } 5124 5125 /* Update Rx queue management counters */ 5126 mb(); 5127 mvpp2_rxq_status_update(port, rxq->id, 1, 1); 5128 5129 /* give packet to stack - skip on first n bytes */ 5130 data = (u8 *)dma_addr + 2 + 32; 5131 5132 if (rx_bytes <= 0) 5133 return 0; 5134 5135 /* 5136 * No cache invalidation needed here, since the rx_buffer's are 5137 * located in a uncached memory region 5138 */ 5139 *packetp = data; 5140 5141 return rx_bytes; 5142} 5143 5144static int mvpp2_send(struct udevice *dev, void *packet, int length) 5145{ 5146 struct mvpp2_port *port = dev_get_priv(dev); 5147 struct mvpp2_tx_queue *txq, *aggr_txq; 5148 struct mvpp2_tx_desc *tx_desc; 5149 int tx_done; 5150 int timeout; 5151 5152 if (port->phyaddr < PHY_MAX_ADDR) 5153 if (!port->phy_dev->link) 5154 return 0; 5155 5156 txq = port->txqs[0]; 5157 aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; 5158 5159 /* Get a descriptor for the first part of the packet */ 5160 tx_desc = mvpp2_txq_next_desc_get(aggr_txq); 5161 mvpp2_txdesc_txq_set(port, tx_desc, txq->id); 5162 mvpp2_txdesc_size_set(port, tx_desc, length); 5163 mvpp2_txdesc_offset_set(port, tx_desc, 5164 (dma_addr_t)packet & MVPP2_TX_DESC_ALIGN); 5165 mvpp2_txdesc_dma_addr_set(port, tx_desc, 5166 (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN); 5167 /* First and Last descriptor */ 5168 mvpp2_txdesc_cmd_set(port, tx_desc, 5169 MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE 5170 | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC); 5171 5172 /* Flush tx data */ 5173 flush_dcache_range((unsigned long)packet, 5174 (unsigned long)packet + ALIGN(length, PKTALIGN)); 5175 5176 /* Enable transmit */ 5177 mb(); 5178 mvpp2_aggr_txq_pend_desc_add(port, 1); 5179 5180 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); 5181 5182 timeout = 0; 5183 do { 5184 if (timeout++ > 10000) { 5185 printf("timeout: packet not sent from aggregated to phys TXQ\n"); 5186 return 0; 5187 } 5188 tx_done = mvpp2_txq_pend_desc_num_get(port, txq); 5189 } while (tx_done); 5190 5191 timeout = 0; 5192 do { 5193 if (timeout++ > 10000) { 5194 printf("timeout: packet not sent\n"); 5195 return 0; 5196 } 5197 tx_done = mvpp2_txq_sent_desc_proc(port, txq); 5198 } while (!tx_done); 5199 5200 return 0; 5201} 5202 5203static int mvpp2_start(struct udevice *dev) 5204{ 5205 struct eth_pdata *pdata = dev_get_plat(dev); 5206 struct mvpp2_port *port = dev_get_priv(dev); 5207 5208 /* Load current MAC address */ 5209 memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN); 5210 5211 /* Reconfigure parser accept the original MAC address */ 5212 mvpp2_prs_update_mac_da(port, port->dev_addr); 5213 5214 switch (port->phy_interface) { 5215 case PHY_INTERFACE_MODE_RGMII: 5216 case PHY_INTERFACE_MODE_RGMII_ID: 5217 case PHY_INTERFACE_MODE_SGMII: 5218 case PHY_INTERFACE_MODE_1000BASEX: 5219 case PHY_INTERFACE_MODE_2500BASEX: 5220 mvpp2_port_power_up(port); 5221 default: 5222 break; 5223 } 5224 5225 mvpp2_open(dev, port); 5226 5227 return 0; 5228} 5229 5230static void mvpp2_stop(struct udevice *dev) 5231{ 5232 struct mvpp2_port *port = dev_get_priv(dev); 5233 5234 mvpp2_stop_dev(port); 5235 mvpp2_cleanup_rxqs(port); 5236 mvpp2_cleanup_txqs(port); 5237} 5238 5239static int mvpp2_write_hwaddr(struct udevice *dev) 5240{ 5241 struct mvpp2_port *port = dev_get_priv(dev); 5242 5243 return mvpp2_prs_update_mac_da(port, port->dev_addr); 5244} 5245 5246static int mvpp2_base_probe(struct udevice *dev) 5247{ 5248 struct mvpp2 *priv = dev_get_priv(dev); 5249 void *bd_space; 5250 u32 size = 0; 5251 int i; 5252 5253 /* Save hw-version */ 5254 priv->hw_version = dev_get_driver_data(dev); 5255 5256 /* 5257 * U-Boot special buffer handling: 5258 * 5259 * Allocate buffer area for descs and rx_buffers. This is only 5260 * done once for all interfaces. As only one interface can 5261 * be active. Make this area DMA-safe by disabling the D-cache 5262 */ 5263 5264 if (!buffer_loc_init) { 5265 /* Align buffer area for descs and rx_buffers to 1MiB */ 5266 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); 5267 mmu_set_region_dcache_behaviour((unsigned long)bd_space, 5268 BD_SPACE, DCACHE_OFF); 5269 5270 buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space; 5271 size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE; 5272 5273 buffer_loc.tx_descs = 5274 (struct mvpp2_tx_desc *)((unsigned long)bd_space + size); 5275 size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE; 5276 5277 buffer_loc.rx_descs = 5278 (struct mvpp2_rx_desc *)((unsigned long)bd_space + size); 5279 size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE; 5280 5281 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) { 5282 buffer_loc.bm_pool[i] = 5283 (unsigned long *)((unsigned long)bd_space + size); 5284 if (priv->hw_version == MVPP21) 5285 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32); 5286 else 5287 size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64); 5288 } 5289 5290 for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) { 5291 buffer_loc.rx_buffer[i] = 5292 (unsigned long *)((unsigned long)bd_space + size); 5293 size += RX_BUFFER_SIZE; 5294 } 5295 5296 /* Clear the complete area so that all descriptors are cleared */ 5297 memset(bd_space, 0, size); 5298 5299 buffer_loc_init = 1; 5300 } 5301 5302 /* Save base addresses for later use */ 5303 priv->base = devfdt_get_addr_index_ptr(dev, 0); 5304 if (!priv->base) 5305 return -EINVAL; 5306 5307 if (priv->hw_version == MVPP21) { 5308 priv->lms_base = devfdt_get_addr_index_ptr(dev, 1); 5309 if (!priv->lms_base) 5310 return -EINVAL; 5311 } else { 5312 priv->iface_base = devfdt_get_addr_index_ptr(dev, 1); 5313 if (!priv->iface_base) 5314 return -EINVAL; 5315 5316 /* Store common base addresses for all ports */ 5317 priv->mpcs_base = priv->iface_base + MVPP22_MPCS; 5318 priv->xpcs_base = priv->iface_base + MVPP22_XPCS; 5319 priv->rfu1_base = priv->iface_base + MVPP22_RFU1; 5320 } 5321 5322 if (priv->hw_version == MVPP21) 5323 priv->max_port_rxqs = 8; 5324 else 5325 priv->max_port_rxqs = 32; 5326 5327 return 0; 5328} 5329 5330static int mvpp2_probe(struct udevice *dev) 5331{ 5332 struct mvpp2_port *port = dev_get_priv(dev); 5333 struct mvpp2 *priv = dev_get_priv(dev->parent); 5334 int err; 5335 5336 /* Only call the probe function for the parent once */ 5337 if (!priv->probe_done) 5338 err = mvpp2_base_probe(dev->parent); 5339 5340 port->priv = priv; 5341 5342 err = phy_info_parse(dev, port); 5343 if (err) 5344 return err; 5345 5346 /* 5347 * We need the port specific io base addresses at this stage, since 5348 * gop_port_init() accesses these registers 5349 */ 5350 if (priv->hw_version == MVPP21) { 5351 int priv_common_regs_num = 2; 5352 5353 port->base = devfdt_get_addr_index_ptr( 5354 dev->parent, priv_common_regs_num + port->id); 5355 if (!port->base) 5356 return -EINVAL; 5357 } else { 5358 port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev), 5359 "gop-port-id", -1); 5360 if (port->id == -1) { 5361 dev_err(dev, "missing gop-port-id value\n"); 5362 return -EINVAL; 5363 } 5364 5365 port->base = priv->iface_base + MVPP22_PORT_BASE + 5366 port->gop_id * MVPP22_PORT_OFFSET; 5367 5368 /* GoP Init */ 5369 gop_port_init(port); 5370 } 5371 5372 if (!priv->probe_done) { 5373 /* Initialize network controller */ 5374 err = mvpp2_init(dev, priv); 5375 if (err < 0) { 5376 dev_err(dev, "failed to initialize controller\n"); 5377 return err; 5378 } 5379 priv->num_ports = 0; 5380 priv->probe_done = 1; 5381 } 5382 5383 err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv); 5384 if (err) 5385 return err; 5386 5387 if (priv->hw_version == MVPP22) { 5388 priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id, 5389 port->phy_interface); 5390 5391 /* Netcomplex configurations for all ports */ 5392 gop_netc_init(priv, MV_NETC_FIRST_PHASE); 5393 gop_netc_init(priv, MV_NETC_SECOND_PHASE); 5394 } 5395 5396 return 0; 5397} 5398 5399/* 5400 * Empty BM pool and stop its activity before the OS is started 5401 */ 5402static int mvpp2_remove(struct udevice *dev) 5403{ 5404 struct mvpp2_port *port = dev_get_priv(dev); 5405 struct mvpp2 *priv = port->priv; 5406 int i; 5407 5408 priv->num_ports--; 5409 5410 if (priv->num_ports) 5411 return 0; 5412 5413 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) 5414 mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]); 5415 5416 return 0; 5417} 5418 5419static const struct eth_ops mvpp2_ops = { 5420 .start = mvpp2_start, 5421 .send = mvpp2_send, 5422 .recv = mvpp2_recv, 5423 .stop = mvpp2_stop, 5424 .write_hwaddr = mvpp2_write_hwaddr 5425}; 5426 5427static struct driver mvpp2_driver = { 5428 .name = "mvpp2", 5429 .id = UCLASS_ETH, 5430 .probe = mvpp2_probe, 5431 .remove = mvpp2_remove, 5432 .ops = &mvpp2_ops, 5433 .priv_auto = sizeof(struct mvpp2_port), 5434 .plat_auto = sizeof(struct eth_pdata), 5435 .flags = DM_FLAG_ACTIVE_DMA, 5436}; 5437 5438/* 5439 * Use a MISC device to bind the n instances (child nodes) of the 5440 * network base controller in UCLASS_ETH. 5441 */ 5442static int mvpp2_base_bind(struct udevice *parent) 5443{ 5444 const void *blob = gd->fdt_blob; 5445 int node = dev_of_offset(parent); 5446 struct uclass_driver *drv; 5447 struct udevice *dev; 5448 struct eth_pdata *plat; 5449 char *name; 5450 int subnode; 5451 u32 id; 5452 int base_id_add; 5453 5454 /* Lookup eth driver */ 5455 drv = lists_uclass_lookup(UCLASS_ETH); 5456 if (!drv) { 5457 puts("Cannot find eth driver\n"); 5458 return -ENOENT; 5459 } 5460 5461 base_id_add = base_id; 5462 5463 fdt_for_each_subnode(subnode, blob, node) { 5464 /* Increment base_id for all subnodes, also the disabled ones */ 5465 base_id++; 5466 5467 /* Skip disabled ports */ 5468 if (!fdtdec_get_is_enabled(blob, subnode)) 5469 continue; 5470 5471 plat = calloc(1, sizeof(*plat)); 5472 if (!plat) 5473 return -ENOMEM; 5474 5475 id = fdtdec_get_int(blob, subnode, "port-id", -1); 5476 id += base_id_add; 5477 5478 name = calloc(1, 16); 5479 if (!name) { 5480 free(plat); 5481 return -ENOMEM; 5482 } 5483 sprintf(name, "mvpp2-%d", id); 5484 5485 /* Create child device UCLASS_ETH and bind it */ 5486 device_bind(parent, &mvpp2_driver, name, plat, 5487 offset_to_ofnode(subnode), &dev); 5488 } 5489 5490 return 0; 5491} 5492 5493static const struct udevice_id mvpp2_ids[] = { 5494 { 5495 .compatible = "marvell,armada-375-pp2", 5496 .data = MVPP21, 5497 }, 5498 { 5499 .compatible = "marvell,armada-7k-pp22", 5500 .data = MVPP22, 5501 }, 5502 { } 5503}; 5504 5505U_BOOT_DRIVER(mvpp2_base) = { 5506 .name = "mvpp2_base", 5507 .id = UCLASS_MISC, 5508 .of_match = mvpp2_ids, 5509 .bind = mvpp2_base_bind, 5510 .priv_auto = sizeof(struct mvpp2), 5511}; 5512