cvmx-mgmt-port.c revision 250191
1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Inc. (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Inc. nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Support functions for managing the MII management port
50 *
51 * <hr>$Revision: 70030 $<hr>
52 */
53#include "cvmx.h"
54#include "cvmx-bootmem.h"
55#include "cvmx-spinlock.h"
56#include "cvmx-mdio.h"
57#include "cvmx-mgmt-port.h"
58#include "cvmx-sysinfo.h"
59#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
60#include "cvmx-error.h"
61#endif
62
63/**
64 * Enum of MIX interface modes
65 */
66typedef enum
67{
68    CVMX_MGMT_PORT_NONE = 0,
69    CVMX_MGMT_PORT_MII_MODE,
70    CVMX_MGMT_PORT_RGMII_MODE,
71} cvmx_mgmt_port_mode_t;
72
73/**
74 * Format of the TX/RX ring buffer entries
75 */
76typedef union
77{
78    uint64_t u64;
79    struct
80    {
81        uint64_t    reserved_62_63  : 2;
82        uint64_t    len             : 14;   /* Length of the buffer/packet in bytes */
83        uint64_t    tstamp          : 1;    /* For TX, signals that the packet should be timestamped */
84        uint64_t    code            : 7;    /* The RX error code */
85        uint64_t    addr            : 40;   /* Physical address of the buffer */
86    } s;
87} cvmx_mgmt_port_ring_entry_t;
88
89/**
90 * Per port state required for each mgmt port
91 */
92typedef struct
93{
94    cvmx_spinlock_t             lock;           /* Used for exclusive access to this structure */
95    int                         tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
96    int                         rx_read_index;  /* Where the next RX will be in the rx_ring and rx_buffers */
97    int                         port;           /* Port to use.  (This is the 'fake' IPD port number */
98    uint64_t                    mac;            /* Our MAC address */
99    cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
100    cvmx_mgmt_port_ring_entry_t rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
101    char                        tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS][CVMX_MGMT_PORT_TX_BUFFER_SIZE];
102    char                        rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS][CVMX_MGMT_PORT_RX_BUFFER_SIZE];
103    cvmx_mgmt_port_mode_t       mode;          /* Mode of the interface */
104} cvmx_mgmt_port_state_t;
105
106/**
107 * Pointers to each mgmt port's state
108 */
109CVMX_SHARED cvmx_mgmt_port_state_t *cvmx_mgmt_port_state_ptr = NULL;
110
111
112/**
113 * Return the number of management ports supported by this chip
114 *
115 * @return Number of ports
116 */
117static int __cvmx_mgmt_port_num_ports(void)
118{
119    if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
120        return 1;
121    else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN6XXX))
122        return 2;
123    else
124        return 0;
125}
126
127
128/**
129 * Return the number of management ports supported on this board.
130 *
131 * @return Number of ports
132 */
133int cvmx_mgmt_port_num_ports(void)
134{
135    return __cvmx_mgmt_port_num_ports();
136}
137
138
139/**
140 * Called to initialize a management port for use. Multiple calls
141 * to this function across applications is safe.
142 *
143 * @param port   Port to initialize
144 *
145 * @return CVMX_MGMT_PORT_SUCCESS or an error code
146 */
147cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port)
148{
149    char *alloc_name = "cvmx_mgmt_port";
150    cvmx_mixx_oring1_t oring1;
151    cvmx_mixx_ctl_t mix_ctl;
152
153    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
154        return CVMX_MGMT_PORT_INVALID_PARAM;
155
156    cvmx_mgmt_port_state_ptr = cvmx_bootmem_alloc_named_flags(CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t), 128, alloc_name, CVMX_BOOTMEM_FLAG_END_ALLOC);
157    if (cvmx_mgmt_port_state_ptr)
158    {
159        memset(cvmx_mgmt_port_state_ptr, 0, CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t));
160    }
161    else
162    {
163        const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
164        if (block_desc)
165            cvmx_mgmt_port_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
166        else
167        {
168            cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Unable to get named block %s on MIX%d.\n", alloc_name, port);
169            return CVMX_MGMT_PORT_NO_MEMORY;
170        }
171    }
172
173    /* Reset the MIX block if the previous user had a different TX ring size, or if
174    ** we allocated a new (and blank) state structure. */
175    mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
176    if (!mix_ctl.s.reset)
177    {
178        oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
179        if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
180        {
181            mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
182            mix_ctl.s.en = 0;
183            cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
184            do
185            {
186                mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
187            } while (mix_ctl.s.busy);
188            mix_ctl.s.reset = 1;
189            cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
190            cvmx_read_csr(CVMX_MIXX_CTL(port));
191            memset(cvmx_mgmt_port_state_ptr + port, 0, sizeof(cvmx_mgmt_port_state_t));
192        }
193    }
194
195    if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
196    {
197        cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
198        int i;
199        cvmx_mixx_bist_t mix_bist;
200        cvmx_agl_gmx_bist_t agl_gmx_bist;
201        cvmx_mixx_oring1_t oring1;
202        cvmx_mixx_iring1_t iring1;
203        cvmx_mixx_ctl_t mix_ctl;
204        cvmx_agl_prtx_ctl_t agl_prtx_ctl;
205
206        /* Make sure BIST passed */
207        mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
208        if (mix_bist.u64)
209            cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port MIX failed BIST (0x%016llx) on MIX%d\n", CAST64(mix_bist.u64), port);
210
211        agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
212        if (agl_gmx_bist.u64)
213            cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port AGL failed BIST (0x%016llx) on MIX%d\n", CAST64(agl_gmx_bist.u64), port);
214
215        /* Clear all state information */
216        memset(state, 0, sizeof(*state));
217
218        /* Take the control logic out of reset */
219        mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
220        mix_ctl.s.reset = 0;
221        cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
222
223        /* Read until reset == 0.  Timeout should never happen... */
224        if (CVMX_WAIT_FOR_FIELD64(CVMX_MIXX_CTL(port), cvmx_mixx_ctl_t, reset, ==, 0, 300000000))
225        {
226            cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Timeout waiting for MIX(%d) reset.\n", port);
227            return CVMX_MGMT_PORT_INIT_ERROR;
228        }
229
230        /* Set the PHY address and mode of the interface (RGMII/MII mode). */
231        if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
232        {
233            state->port = -1;
234            state->mode = CVMX_MGMT_PORT_MII_MODE;
235        }
236        else
237        {
238            int port_num = CVMX_HELPER_BOARD_MGMT_IPD_PORT + port;
239            int phy_addr = cvmx_helper_board_get_mii_address(port_num);
240            if (phy_addr != -1)
241            {
242                cvmx_mdio_phy_reg_status_t phy_status;
243                /* Read PHY status register to find the mode of the interface. */
244                phy_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
245                if (phy_status.s.capable_extended_status == 0) // MII mode
246                    state->mode = CVMX_MGMT_PORT_MII_MODE;
247                else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)
248                         && phy_status.s.capable_extended_status) // RGMII mode
249                    state->mode = CVMX_MGMT_PORT_RGMII_MODE;
250                else
251                    state->mode = CVMX_MGMT_PORT_NONE;
252            }
253            else
254            {
255                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Not able to read the PHY on MIX%d\n", port);
256                return CVMX_MGMT_PORT_INVALID_PARAM;
257            }
258            state->port = port_num;
259        }
260
261        /* All interfaces should be configured in same mode */
262        for (i = 0; i < __cvmx_mgmt_port_num_ports(); i++)
263        {
264            if (i != port
265                && cvmx_mgmt_port_state_ptr[i].mode != CVMX_MGMT_PORT_NONE
266                && cvmx_mgmt_port_state_ptr[i].mode != state->mode)
267            {
268                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: All ports in MIX interface are not configured in same mode.\n \
269	Port %d is configured as %d\n \
270	And Port %d is configured as %d\n", port, state->mode, i, cvmx_mgmt_port_state_ptr[i].mode);
271                return CVMX_MGMT_PORT_INVALID_PARAM;
272            }
273        }
274
275        /* Create a default MAC address */
276        state->mac = 0x000000dead000000ull;
277        state->mac += 0xffffff & CAST64(state);
278
279        /* Setup the TX ring */
280        for (i=0; i<CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++)
281        {
282            state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
283            state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
284        }
285
286        /* Tell the HW where the TX ring is */
287        oring1.u64 = 0;
288        oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
289        oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
290        CVMX_SYNCWS;
291        cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
292
293        /* Setup the RX ring */
294        for (i=0; i<CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++)
295        {
296            /* This size is -8 due to an errata for CN56XX pass 1 */
297            state->rx_ring[i].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
298            state->rx_ring[i].s.addr = cvmx_ptr_to_phys(state->rx_buffers[i]);
299        }
300
301        /* Tell the HW where the RX ring is */
302        iring1.u64 = 0;
303        iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring)>>3;
304        iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
305        CVMX_SYNCWS;
306        cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
307        cvmx_write_csr(CVMX_MIXX_IRING2(port), CVMX_MGMT_PORT_NUM_RX_BUFFERS);
308
309        /* Disable the external input/output */
310        cvmx_mgmt_port_disable(port);
311
312        /* Set the MAC address filtering up */
313        cvmx_mgmt_port_set_mac(port, state->mac);
314
315        /* Set the default max size to an MTU of 1500 with L2 and VLAN */
316        cvmx_mgmt_port_set_max_packet_size(port, 1518);
317
318        /* Enable the port HW. Packets are not allowed until cvmx_mgmt_port_enable() is called */
319        mix_ctl.u64 = 0;
320        mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
321        mix_ctl.s.en = 1;           /* Enable the port */
322        mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
323        mix_ctl.s.mrq_hwm = 1;      /* MII CB-request FIFO programmable high watermark */
324        cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
325
326        /* Select the mode of operation for the interface. */
327        if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
328        {
329            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
330
331            if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
332                agl_prtx_ctl.s.mode = 0;
333            else if (state->mode == CVMX_MGMT_PORT_MII_MODE)
334                agl_prtx_ctl.s.mode = 1;
335            else
336            {
337                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Invalid mode for MIX(%d)\n", port);
338                return CVMX_MGMT_PORT_INVALID_PARAM;
339            }
340
341            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
342	}
343
344        /* Initialize the physical layer. */
345        if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
346        {
347            /* MII clocks counts are based on the 125Mhz reference, so our
348                delays need to be scaled to match the core clock rate. The
349                "+1" is to make sure rounding always waits a little too
350                long. */
351            uint64_t clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;
352
353            /* Take the DLL and clock tree out of reset */
354            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
355            agl_prtx_ctl.s.clkrst = 0;
356            if (state->mode == CVMX_MGMT_PORT_RGMII_MODE) // RGMII Initialization
357            {
358                agl_prtx_ctl.s.dllrst = 0;
359                agl_prtx_ctl.s.clktx_byp = 0;
360            }
361            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
362            cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));  /* Force write out before wait */
363
364            /* Wait for the DLL to lock.  External 125 MHz reference clock must be stable at this point. */
365            cvmx_wait(256 * clock_scale);
366
367            /* The rest of the config is common between RGMII/MII */
368
369            /* Enable the interface */
370            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
371            agl_prtx_ctl.s.enable = 1;
372            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
373
374            /* Read the value back to force the previous write */
375            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
376
377            /* Enable the componsation controller */
378            agl_prtx_ctl.s.comp = 1;
379            agl_prtx_ctl.s.drv_byp = 0;
380            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
381            cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));  /* Force write out before wait */
382            cvmx_wait(1024 * clock_scale); // for componsation state to lock.
383        }
384        else if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
385        {
386            /* Force compensation values, as they are not determined properly by HW */
387            cvmx_agl_gmx_drv_ctl_t drv_ctl;
388
389            drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
390            if (port)
391            {
392                drv_ctl.s.byp_en1 = 1;
393                drv_ctl.s.nctl1 = 6;
394                drv_ctl.s.pctl1 = 6;
395            }
396            else
397            {
398                drv_ctl.s.byp_en = 1;
399                drv_ctl.s.nctl = 6;
400                drv_ctl.s.pctl = 6;
401            }
402            cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
403        }
404    }
405#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
406    cvmx_error_enable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
407#endif
408    return CVMX_MGMT_PORT_SUCCESS;
409}
410
411
412/**
413 * Shutdown a management port. This currently disables packet IO
414 * but leaves all hardware and buffers. Another application can then
415 * call initialize() without redoing the hardware setup.
416 *
417 * @param port   Management port
418 *
419 * @return CVMX_MGMT_PORT_SUCCESS or an error code
420 */
421cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port)
422{
423    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
424        return CVMX_MGMT_PORT_INVALID_PARAM;
425
426#if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
427    cvmx_error_disable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
428#endif
429
430    /* Stop packets from comming in */
431    cvmx_mgmt_port_disable(port);
432
433    /* We don't free any memory so the next intialize can reuse the HW setup */
434    return CVMX_MGMT_PORT_SUCCESS;
435}
436
437
438/**
439 * Enable packet IO on a management port
440 *
441 * @param port   Management port
442 *
443 * @return CVMX_MGMT_PORT_SUCCESS or an error code
444 */
445cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port)
446{
447    cvmx_mgmt_port_state_t *state;
448    cvmx_agl_gmx_inf_mode_t agl_gmx_inf_mode;
449    cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
450
451    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
452        return CVMX_MGMT_PORT_INVALID_PARAM;
453
454    state = cvmx_mgmt_port_state_ptr + port;
455
456    cvmx_spinlock_lock(&state->lock);
457
458    rxx_frm_ctl.u64 = 0;
459    rxx_frm_ctl.s.pre_align = 1;
460    rxx_frm_ctl.s.pad_len = 1;  /* When set, disables the length check for non-min sized pkts with padding in the client data */
461    rxx_frm_ctl.s.vlan_len = 1; /* When set, disables the length check for VLAN pkts */
462    rxx_frm_ctl.s.pre_free = 1; /* When set, PREAMBLE checking is  less strict */
463    rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match station SMAC */
464    rxx_frm_ctl.s.ctl_mcst = 1; /* Control Pause Frames can match globally assign Multicast address */
465    rxx_frm_ctl.s.ctl_bck = 1;  /* Forward pause information to TX block */
466    rxx_frm_ctl.s.ctl_drp = 1;  /* Drop Control Pause Frames */
467    rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
468    rxx_frm_ctl.s.pre_chk = 1;  /* This port is configured to send PREAMBLE+SFD to begin every frame.  GMX checks that the PREAMBLE is sent correctly */
469    cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
470
471    /* Enable the AGL block */
472    if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
473    {
474        agl_gmx_inf_mode.u64 = 0;
475        agl_gmx_inf_mode.s.en = 1;
476        cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
477    }
478
479    /* Configure the port duplex and enables */
480    cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
481
482    cvmx_spinlock_unlock(&state->lock);
483    return CVMX_MGMT_PORT_SUCCESS;
484}
485
486
487/**
488 * Disable packet IO on a management port
489 *
490 * @param port   Management port
491 *
492 * @return CVMX_MGMT_PORT_SUCCESS or an error code
493 */
494cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port)
495{
496    cvmx_mgmt_port_state_t *state;
497    cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
498
499    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
500        return CVMX_MGMT_PORT_INVALID_PARAM;
501
502    state = cvmx_mgmt_port_state_ptr + port;
503
504    cvmx_spinlock_lock(&state->lock);
505
506    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
507    agl_gmx_prtx.s.en = 0;
508    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
509
510    cvmx_spinlock_unlock(&state->lock);
511    return CVMX_MGMT_PORT_SUCCESS;
512}
513
514
515/**
516 * Send a packet out the management port. The packet is copied so
517 * the input buffer isn't used after this call.
518 *
519 * @param port       Management port
520 * @param packet_len Length of the packet to send. It does not include the final CRC
521 * @param buffer     Packet data
522 *
523 * @return CVMX_MGMT_PORT_SUCCESS or an error code
524 */
525cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer)
526{
527    cvmx_mgmt_port_state_t *state;
528    cvmx_mixx_oring2_t mix_oring2;
529
530    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
531        return CVMX_MGMT_PORT_INVALID_PARAM;
532
533    /* Max sure the packet size is valid */
534    if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
535        return CVMX_MGMT_PORT_INVALID_PARAM;
536
537    if (buffer == NULL)
538        return CVMX_MGMT_PORT_INVALID_PARAM;
539
540    state = cvmx_mgmt_port_state_ptr + port;
541
542    cvmx_spinlock_lock(&state->lock);
543
544    mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
545    if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
546    {
547        /* No room for another packet */
548        cvmx_spinlock_unlock(&state->lock);
549        return CVMX_MGMT_PORT_NO_MEMORY;
550    }
551    else
552    {
553        /* Copy the packet into the output buffer */
554        memcpy(state->tx_buffers[state->tx_write_index], buffer, packet_len);
555        /* Insert the source MAC */
556        memcpy(state->tx_buffers[state->tx_write_index] + 6, ((char*)&state->mac) + 2, 6);
557        /* Update the TX ring buffer entry size */
558        state->tx_ring[state->tx_write_index].s.len = packet_len;
559        /* This code doesn't support TX timestamps */
560        state->tx_ring[state->tx_write_index].s.tstamp = 0;
561        /* Increment our TX index */
562        state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
563        /* Ring the doorbell, sending the packet */
564        CVMX_SYNCWS;
565        cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
566        if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
567            cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
568
569        cvmx_spinlock_unlock(&state->lock);
570        return CVMX_MGMT_PORT_SUCCESS;
571    }
572}
573
574
575#if defined(__FreeBSD__)
576/**
577 * Send a packet out the management port. The packet is copied so
578 * the input mbuf isn't used after this call.
579 *
580 * @param port       Management port
581 * @param m          Packet mbuf (with pkthdr)
582 *
583 * @return CVMX_MGMT_PORT_SUCCESS or an error code
584 */
585cvmx_mgmt_port_result_t cvmx_mgmt_port_sendm(int port, const struct mbuf *m)
586{
587    cvmx_mgmt_port_state_t *state;
588    cvmx_mixx_oring2_t mix_oring2;
589
590    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
591        return CVMX_MGMT_PORT_INVALID_PARAM;
592
593    /* Max sure the packet size is valid */
594    if ((m->m_pkthdr.len < 1) || (m->m_pkthdr.len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
595        return CVMX_MGMT_PORT_INVALID_PARAM;
596
597    state = cvmx_mgmt_port_state_ptr + port;
598
599    cvmx_spinlock_lock(&state->lock);
600
601    mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
602    if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
603    {
604        /* No room for another packet */
605        cvmx_spinlock_unlock(&state->lock);
606        return CVMX_MGMT_PORT_NO_MEMORY;
607    }
608    else
609    {
610        /* Copy the packet into the output buffer */
611	m_copydata(m, 0, m->m_pkthdr.len, state->tx_buffers[state->tx_write_index]);
612        /* Update the TX ring buffer entry size */
613        state->tx_ring[state->tx_write_index].s.len = m->m_pkthdr.len;
614        /* This code doesn't support TX timestamps */
615        state->tx_ring[state->tx_write_index].s.tstamp = 0;
616        /* Increment our TX index */
617        state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
618        /* Ring the doorbell, sending the packet */
619        CVMX_SYNCWS;
620        cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
621        if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
622            cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
623
624        cvmx_spinlock_unlock(&state->lock);
625        return CVMX_MGMT_PORT_SUCCESS;
626    }
627}
628#endif
629
630
631/**
632 * Receive a packet from the management port.
633 *
634 * @param port       Management port
635 * @param buffer_len Size of the buffer to receive the packet into
636 * @param buffer     Buffer to receive the packet into
637 *
638 * @return The size of the packet, or a negative erorr code on failure. Zero
639 *         means that no packets were available.
640 */
641int cvmx_mgmt_port_receive(int port, int buffer_len, uint8_t *buffer)
642{
643    cvmx_mixx_ircnt_t mix_ircnt;
644    cvmx_mgmt_port_state_t *state;
645    int result;
646
647    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
648        return CVMX_MGMT_PORT_INVALID_PARAM;
649
650    /* Max sure the buffer size is valid */
651    if (buffer_len < 1)
652        return CVMX_MGMT_PORT_INVALID_PARAM;
653
654    if (buffer == NULL)
655        return CVMX_MGMT_PORT_INVALID_PARAM;
656
657    state = cvmx_mgmt_port_state_ptr + port;
658
659    cvmx_spinlock_lock(&state->lock);
660
661    /* Find out how many RX packets are pending */
662    mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
663    if (mix_ircnt.s.ircnt)
664    {
665        uint64_t *source = (void *)state->rx_buffers[state->rx_read_index];
666	uint64_t *zero_check = source;
667        /* CN56XX pass 1 has an errata where packets might start 8 bytes
668            into the buffer instead of at their correct lcoation. If the
669            first 8 bytes is zero we assume this has happened */
670        if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && (*zero_check == 0))
671            source++;
672        /* Start off with zero bytes received */
673        result = 0;
674        /* While the completion code signals more data, copy the buffers
675            into the user's data */
676        while (state->rx_ring[state->rx_read_index].s.code == 16)
677        {
678            /* Only copy what will fit in the user's buffer */
679            int length = state->rx_ring[state->rx_read_index].s.len;
680            if (length > buffer_len)
681                length = buffer_len;
682            memcpy(buffer, source, length);
683            /* Reduce the size of the buffer to the remaining space. If we run
684                out we will signal an error when the code 15 buffer doesn't fit */
685            buffer += length;
686            buffer_len -= length;
687            result += length;
688            /* Update this buffer for reuse in future receives. This size is
689                -8 due to an errata for CN56XX pass 1 */
690            state->rx_ring[state->rx_read_index].s.code = 0;
691            state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
692            state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
693            /* Zero the beginning of the buffer for use by the errata check */
694            *zero_check = 0;
695            CVMX_SYNCWS;
696            /* Increment the number of RX buffers */
697            cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
698            source = (void *)state->rx_buffers[state->rx_read_index];
699            zero_check = source;
700        }
701
702        /* Check for the final good completion code */
703        if (state->rx_ring[state->rx_read_index].s.code == 15)
704        {
705            if (buffer_len >= state->rx_ring[state->rx_read_index].s.len)
706            {
707                int length = state->rx_ring[state->rx_read_index].s.len;
708                memcpy(buffer, source, length);
709                result += length;
710            }
711            else
712            {
713                /* Not enough room for the packet */
714                cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Packet (%d) larger than supplied buffer (%d)\n", state->rx_ring[state->rx_read_index].s.len, buffer_len);
715                result = CVMX_MGMT_PORT_NO_MEMORY;
716            }
717        }
718        else
719        {
720            cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive error code %d. Packet dropped(Len %d), \n",
721                         state->rx_ring[state->rx_read_index].s.code, state->rx_ring[state->rx_read_index].s.len + result);
722            result = -state->rx_ring[state->rx_read_index].s.code;
723
724
725            /* Check to see if we need to change the duplex. */
726            cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
727        }
728
729        /* Clean out the ring buffer entry. This size is -8 due to an errata
730            for CN56XX pass 1 */
731        state->rx_ring[state->rx_read_index].s.code = 0;
732        state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
733        state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
734        /* Zero the beginning of the buffer for use by the errata check */
735        *zero_check = 0;
736        CVMX_SYNCWS;
737        /* Increment the number of RX buffers */
738        cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
739        /* Decrement the pending RX count */
740        cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
741    }
742    else
743    {
744        /* No packets available */
745        result = 0;
746    }
747    cvmx_spinlock_unlock(&state->lock);
748    return result;
749}
750
751/**
752 * Set the MAC address for a management port
753 *
754 * @param port   Management port
755 * @param mac    New MAC address. The lower 6 bytes are used.
756 *
757 * @return CVMX_MGMT_PORT_SUCCESS or an error code
758 */
759cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac)
760{
761    cvmx_mgmt_port_state_t *state;
762    cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
763
764    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
765        return CVMX_MGMT_PORT_INVALID_PARAM;
766
767    state = cvmx_mgmt_port_state_ptr + port;
768
769    cvmx_spinlock_lock(&state->lock);
770
771    agl_gmx_rxx_adr_ctl.u64 = 0;
772    agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Only accept matching MAC addresses */
773    agl_gmx_rxx_adr_ctl.s.mcst = 0;     /* Drop multicast */
774    agl_gmx_rxx_adr_ctl.s.bcst = 1;     /* Allow broadcast */
775    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
776
777    /* Only using one of the CAMs */
778    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
779    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
780    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
781    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
782    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
783    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
784    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
785    state->mac = mac;
786
787    cvmx_spinlock_unlock(&state->lock);
788    return CVMX_MGMT_PORT_SUCCESS;
789}
790
791
792/**
793 * Get the MAC address for a management port
794 *
795 * @param port   Management port
796 *
797 * @return MAC address
798 */
799uint64_t cvmx_mgmt_port_get_mac(int port)
800{
801    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
802        return CVMX_MGMT_PORT_INVALID_PARAM;
803
804    return cvmx_mgmt_port_state_ptr[port].mac;
805}
806
807/**
808 * Set the multicast list.
809 *
810 * @param port   Management port
811 * @param flags  Interface flags
812 *
813 * @return
814 */
815void cvmx_mgmt_port_set_multicast_list(int port, int flags)
816{
817    cvmx_mgmt_port_state_t *state;
818    cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
819
820    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
821        return;
822
823    state = cvmx_mgmt_port_state_ptr + port;
824
825    cvmx_spinlock_lock(&state->lock);
826
827    agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
828
829    /* Allow broadcast MAC addresses */
830    if (!agl_gmx_rxx_adr_ctl.s.bcst)
831	agl_gmx_rxx_adr_ctl.s.bcst = 1;
832
833    if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
834	agl_gmx_rxx_adr_ctl.s.mcst = 2; /* Force accept multicast packets */
835    else
836	agl_gmx_rxx_adr_ctl.s.mcst = 1; /* Force reject multicast packets */
837
838    if (flags & CVMX_IFF_PROMISC)
839	agl_gmx_rxx_adr_ctl.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */
840    else
841	agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Filter packets based on the CAM */
842
843    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
844
845    if (flags & CVMX_IFF_PROMISC)
846	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
847    else
848	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
849
850    cvmx_spinlock_unlock(&state->lock);
851}
852
853
854/**
855 * Set the maximum packet allowed in. Size is specified
856 * including L2 but without FCS. A normal MTU would corespond
857 * to 1514 assuming the standard 14 byte L2 header.
858 *
859 * @param port   Management port
860 * @param size_without_fcs
861 *               Size in bytes without FCS
862 */
863void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
864{
865    cvmx_mgmt_port_state_t *state;
866
867    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
868        return;
869
870    state = cvmx_mgmt_port_state_ptr + port;
871
872    cvmx_spinlock_lock(&state->lock);
873    cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
874    cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), (size_without_fcs+7) & 0xfff8);
875    cvmx_spinlock_unlock(&state->lock);
876}
877
878/**
879 * Return the link state of an RGMII/MII port as returned by
880 * auto negotiation. The result of this function may not match
881 * Octeon's link config if auto negotiation has changed since
882 * the last call to cvmx_mgmt_port_link_set().
883 *
884 * @param port     The RGMII/MII interface port to query
885 *
886 * @return Link state
887 */
888cvmx_helper_link_info_t cvmx_mgmt_port_link_get(int port)
889{
890    cvmx_mgmt_port_state_t *state;
891    cvmx_helper_link_info_t result;
892
893    state = cvmx_mgmt_port_state_ptr + port;
894    result.u64 = 0;
895
896    if (port > __cvmx_mgmt_port_num_ports())
897    {
898        cvmx_dprintf("WARNING: Invalid port %d\n", port);
899        return result;
900    }
901
902    if (state->port != -1)
903        return __cvmx_helper_board_link_get(state->port);
904    else // Simulator does not have PHY, use some defaults.
905    {
906        result.s.full_duplex = 1;
907        result.s.link_up = 1;
908        result.s.speed = 100;
909        return result;
910    }
911    return result;
912}
913
914/**
915 * Configure RGMII/MII port for the specified link state. This
916 * function does not influence auto negotiation at the PHY level.
917 *
918 * @param port      RGMII/MII interface port
919 * @param link_info The new link state
920 *
921 * @return Zero on success, negative on failure
922 */
923int cvmx_mgmt_port_link_set(int port, cvmx_helper_link_info_t link_info)
924{
925    cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
926
927    /* Disable GMX before we make any changes. */
928    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
929    agl_gmx_prtx.s.en = 0;
930    agl_gmx_prtx.s.tx_en = 0;
931    agl_gmx_prtx.s.rx_en = 0;
932    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
933
934    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
935    {
936        uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
937        /* Wait for GMX to be idle */
938        if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==, 1, one_second)
939            || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==, 1, one_second))
940        {
941            cvmx_dprintf("MIX%d: Timeout waiting for GMX to be idle\n", port);
942            return -1;
943        }
944    }
945
946    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
947
948    /* Set duplex mode */
949    if (!link_info.s.link_up)
950        agl_gmx_prtx.s.duplex = 1;   /* Force full duplex on down links */
951    else
952        agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
953
954   switch(link_info.s.speed)
955    {
956        case 10:
957            agl_gmx_prtx.s.speed = 0;
958            agl_gmx_prtx.s.slottime = 0;
959            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
960            {
961                agl_gmx_prtx.s.speed_msb = 1;
962                agl_gmx_prtx.s.burst = 1;
963            }
964         break;
965
966        case 100:
967            agl_gmx_prtx.s.speed = 0;
968            agl_gmx_prtx.s.slottime = 0;
969            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
970            {
971                agl_gmx_prtx.s.speed_msb = 0;
972                agl_gmx_prtx.s.burst = 1;
973            }
974            break;
975
976        case 1000:
977            /* 1000 MBits is only supported on 6XXX chips */
978            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
979            {
980                agl_gmx_prtx.s.speed_msb = 0;
981                agl_gmx_prtx.s.speed = 1;
982                agl_gmx_prtx.s.slottime = 1;  /* Only matters for half-duplex */
983                agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
984            }
985            break;
986
987        /* No link */
988        case 0:
989        default:
990            break;
991    }
992
993    /* Write the new GMX setting with the port still disabled. */
994    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
995
996    /* Read GMX CFG again to make sure the config is completed. */
997    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
998
999
1000    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
1001    {
1002        cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
1003        cvmx_agl_gmx_txx_clk_t agl_clk;
1004        agl_clk.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_CLK(port));
1005        agl_clk.s.clk_cnt = 1;    /* MII (both speeds) and RGMII 1000 setting */
1006        if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
1007        {
1008            if (link_info.s.speed == 10)
1009                agl_clk.s.clk_cnt = 50;
1010            else if (link_info.s.speed == 100)
1011                agl_clk.s.clk_cnt = 5;
1012        }
1013        cvmx_write_csr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
1014    }
1015
1016    /* Enable transmit and receive ports */
1017    agl_gmx_prtx.s.tx_en = 1;
1018    agl_gmx_prtx.s.rx_en = 1;
1019    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1020
1021    /* Enable the link. */
1022    agl_gmx_prtx.s.en = 1;
1023    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1024    return 0;
1025}
1026