1/***********************license start***************
2 * Copyright (c) 2003-2010  Cavium Networks (support@cavium.com). All rights
3 * reserved.
4 *
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 *   * Redistributions of source code must retain the above copyright
11 *     notice, this list of conditions and the following disclaimer.
12 *
13 *   * Redistributions in binary form must reproduce the above
14 *     copyright notice, this list of conditions and the following
15 *     disclaimer in the documentation and/or other materials provided
16 *     with the distribution.
17
18 *   * Neither the name of Cavium Networks nor the names of
19 *     its contributors may be used to endorse or promote products
20 *     derived from this software without specific prior written
21 *     permission.
22
23 * This Software, including technical data, may be subject to U.S. export  control
24 * laws, including the U.S. Export Administration Act and its  associated
25 * regulations, and may be subject to export or import  regulations in other
26 * countries.
27
28 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
29 * AND WITH ALL FAULTS AND CAVIUM  NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
30 * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
31 * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
32 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
33 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
34 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
35 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
36 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE  RISK ARISING OUT OF USE OR
37 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
38 ***********************license end**************************************/
39
40
41
42
43
44
45
46/**
47 * @file
48 *
49 * Support functions for managing the MII management port
50 *
51 * <hr>$Revision: 49628 $<hr>
52 */
53#include "cvmx.h"
54#include "cvmx-bootmem.h"
55#include "cvmx-spinlock.h"
56#include "cvmx-mdio.h"
57#include "cvmx-mgmt-port.h"
58#include "cvmx-sysinfo.h"
59#include "cvmx-error.h"
60
61/**
62 * Enum of MIX interface modes
63 */
64typedef enum
65{
66    CVMX_MGMT_PORT_NONE = 0,
67    CVMX_MGMT_PORT_MII_MODE,
68    CVMX_MGMT_PORT_RGMII_MODE,
69} cvmx_mgmt_port_mode_t;
70
71/**
72 * Format of the TX/RX ring buffer entries
73 */
74typedef union
75{
76    uint64_t u64;
77    struct
78    {
79        uint64_t    reserved_62_63  : 2;
80        uint64_t    len             : 14;   /* Length of the buffer/packet in bytes */
81        uint64_t    tstamp          : 1;    /* For TX, signals that the packet should be timestamped */
82        uint64_t    code            : 7;    /* The RX error code */
83        uint64_t    addr            : 40;   /* Physical address of the buffer */
84    } s;
85} cvmx_mgmt_port_ring_entry_t;
86
87/**
88 * Per port state required for each mgmt port
89 */
90typedef struct
91{
92    cvmx_spinlock_t             lock;           /* Used for exclusive access to this structure */
93    int                         tx_write_index; /* Where the next TX will write in the tx_ring and tx_buffers */
94    int                         rx_read_index;  /* Where the next RX will be in the rx_ring and rx_buffers */
95    int                         port;           /* Port to use.  (This is the 'fake' IPD port number */
96    uint64_t                    mac;            /* Our MAC address */
97    cvmx_mgmt_port_ring_entry_t tx_ring[CVMX_MGMT_PORT_NUM_TX_BUFFERS];
98    cvmx_mgmt_port_ring_entry_t rx_ring[CVMX_MGMT_PORT_NUM_RX_BUFFERS];
99    char                        tx_buffers[CVMX_MGMT_PORT_NUM_TX_BUFFERS][CVMX_MGMT_PORT_TX_BUFFER_SIZE];
100    char                        rx_buffers[CVMX_MGMT_PORT_NUM_RX_BUFFERS][CVMX_MGMT_PORT_RX_BUFFER_SIZE];
101    cvmx_mgmt_port_mode_t       mode;          /* Mode of the interface */
102} cvmx_mgmt_port_state_t;
103
104/**
105 * Pointers to each mgmt port's state
106 */
107CVMX_SHARED cvmx_mgmt_port_state_t *cvmx_mgmt_port_state_ptr = NULL;
108
109
110/**
111 * Return the number of management ports supported by this chip
112 *
113 * @return Number of ports
114 */
115static int __cvmx_mgmt_port_num_ports(void)
116{
117    if (OCTEON_IS_MODEL(OCTEON_CN56XX))
118        return 1;
119    else if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
120        return 2;
121    else
122        return 0;
123}
124
125
126/**
127 * Called to initialize a management port for use. Multiple calls
128 * to this function across applications is safe.
129 *
130 * @param port   Port to initialize
131 *
132 * @return CVMX_MGMT_PORT_SUCCESS or an error code
133 */
134cvmx_mgmt_port_result_t cvmx_mgmt_port_initialize(int port)
135{
136    char *alloc_name = "cvmx_mgmt_port";
137    cvmx_mixx_oring1_t oring1;
138    cvmx_mixx_ctl_t mix_ctl;
139
140    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
141        return CVMX_MGMT_PORT_INVALID_PARAM;
142
143    cvmx_mgmt_port_state_ptr = cvmx_bootmem_alloc_named(CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t), 128, alloc_name);
144    if (cvmx_mgmt_port_state_ptr)
145    {
146        memset(cvmx_mgmt_port_state_ptr, 0, CVMX_MGMT_PORT_NUM_PORTS * sizeof(cvmx_mgmt_port_state_t));
147    }
148    else
149    {
150        const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
151        if (block_desc)
152            cvmx_mgmt_port_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
153        else
154        {
155            cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Unable to get named block %s on MIX%d.\n", alloc_name, port);
156            return CVMX_MGMT_PORT_NO_MEMORY;
157        }
158    }
159
160    /* Reset the MIX block if the previous user had a different TX ring size, or if
161    ** we allocated a new (and blank) state structure. */
162    mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
163    if (!mix_ctl.s.reset)
164    {
165        oring1.u64 = cvmx_read_csr(CVMX_MIXX_ORING1(port));
166        if (oring1.s.osize != CVMX_MGMT_PORT_NUM_TX_BUFFERS || cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
167        {
168            mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
169            mix_ctl.s.en = 0;
170            cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
171            do
172            {
173                mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
174            } while (mix_ctl.s.busy);
175            mix_ctl.s.reset = 1;
176            cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
177            cvmx_read_csr(CVMX_MIXX_CTL(port));
178            memset(cvmx_mgmt_port_state_ptr + port, 0, sizeof(cvmx_mgmt_port_state_t));
179        }
180    }
181
182    if (cvmx_mgmt_port_state_ptr[port].tx_ring[0].u64 == 0)
183    {
184        cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
185        int i;
186        cvmx_mixx_bist_t mix_bist;
187        cvmx_agl_gmx_bist_t agl_gmx_bist;
188        cvmx_mixx_oring1_t oring1;
189        cvmx_mixx_iring1_t iring1;
190        cvmx_mixx_ctl_t mix_ctl;
191        cvmx_agl_prtx_ctl_t agl_prtx_ctl;
192
193        /* Make sure BIST passed */
194        mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(port));
195        if (mix_bist.u64)
196            cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port MIX failed BIST (0x%016llx) on MIX%d\n", CAST64(mix_bist.u64), port);
197
198        agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
199        if (agl_gmx_bist.u64)
200            cvmx_dprintf("WARNING: cvmx_mgmt_port_initialize: Managment port AGL failed BIST (0x%016llx) on MIX%d\n", CAST64(agl_gmx_bist.u64), port);
201
202        /* Clear all state information */
203        memset(state, 0, sizeof(*state));
204
205        /* Take the control logic out of reset */
206        mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
207        mix_ctl.s.reset = 0;
208        cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
209
210        /* Read until reset == 0.  Timeout should never happen... */
211        if (CVMX_WAIT_FOR_FIELD64(CVMX_MIXX_CTL(port), cvmx_mixx_ctl_t, reset, ==, 0, 300000000))
212        {
213            cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Timeout waiting for MIX(%d) reset.\n", port);
214            return CVMX_MGMT_PORT_INIT_ERROR;
215        }
216
217        /* Set the PHY address and mode of the interface (RGMII/MII mode). */
218        if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
219        {
220            state->port = -1;
221            state->mode = CVMX_MGMT_PORT_MII_MODE;
222        }
223        else
224        {
225            int port_num = CVMX_HELPER_BOARD_MGMT_IPD_PORT + port;
226            int phy_addr = cvmx_helper_board_get_mii_address(port_num);
227            if (phy_addr != -1)
228            {
229                cvmx_mdio_phy_reg_status_t phy_status;
230                /* Read PHY status register to find the mode of the interface. */
231                phy_status.u16 = cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, CVMX_MDIO_PHY_REG_STATUS);
232                if (phy_status.s.capable_extended_status == 0) // MII mode
233                    state->mode = CVMX_MGMT_PORT_MII_MODE;
234                else if (OCTEON_IS_MODEL(OCTEON_CN6XXX)
235                         && phy_status.s.capable_extended_status) // RGMII mode
236                    state->mode = CVMX_MGMT_PORT_RGMII_MODE;
237                else
238                    state->mode = CVMX_MGMT_PORT_NONE;
239            }
240            else
241            {
242                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Not able to read the PHY on MIX%d\n", port);
243                return CVMX_MGMT_PORT_INVALID_PARAM;
244            }
245            state->port = port_num;
246        }
247
248        /* All interfaces should be configured in same mode */
249        for (i = 0; i < __cvmx_mgmt_port_num_ports(); i++)
250        {
251            if (i != port
252                && cvmx_mgmt_port_state_ptr[i].mode != CVMX_MGMT_PORT_NONE
253                && cvmx_mgmt_port_state_ptr[i].mode != state->mode)
254            {
255                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: All ports in MIX interface are not configured in same mode.\n \
256	Port %d is configured as %d\n \
257	And Port %d is configured as %d\n", port, state->mode, i, cvmx_mgmt_port_state_ptr[i].mode);
258                return CVMX_MGMT_PORT_INVALID_PARAM;
259            }
260        }
261
262        /* Create a default MAC address */
263        state->mac = 0x000000dead000000ull;
264        state->mac += 0xffffff & CAST64(state);
265
266        /* Setup the TX ring */
267        for (i=0; i<CVMX_MGMT_PORT_NUM_TX_BUFFERS; i++)
268        {
269            state->tx_ring[i].s.len = CVMX_MGMT_PORT_TX_BUFFER_SIZE;
270            state->tx_ring[i].s.addr = cvmx_ptr_to_phys(state->tx_buffers[i]);
271        }
272
273        /* Tell the HW where the TX ring is */
274        oring1.u64 = 0;
275        oring1.s.obase = cvmx_ptr_to_phys(state->tx_ring)>>3;
276        oring1.s.osize = CVMX_MGMT_PORT_NUM_TX_BUFFERS;
277        CVMX_SYNCWS;
278        cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);
279
280        /* Setup the RX ring */
281        for (i=0; i<CVMX_MGMT_PORT_NUM_RX_BUFFERS; i++)
282        {
283            /* This size is -8 due to an errata for CN56XX pass 1 */
284            state->rx_ring[i].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
285            state->rx_ring[i].s.addr = cvmx_ptr_to_phys(state->rx_buffers[i]);
286        }
287
288        /* Tell the HW where the RX ring is */
289        iring1.u64 = 0;
290        iring1.s.ibase = cvmx_ptr_to_phys(state->rx_ring)>>3;
291        iring1.s.isize = CVMX_MGMT_PORT_NUM_RX_BUFFERS;
292        CVMX_SYNCWS;
293        cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);
294        cvmx_write_csr(CVMX_MIXX_IRING2(port), CVMX_MGMT_PORT_NUM_RX_BUFFERS);
295
296        /* Disable the external input/output */
297        cvmx_mgmt_port_disable(port);
298
299        /* Set the MAC address filtering up */
300        cvmx_mgmt_port_set_mac(port, state->mac);
301
302        /* Set the default max size to an MTU of 1500 with L2 and VLAN */
303        cvmx_mgmt_port_set_max_packet_size(port, 1518);
304
305        /* Enable the port HW. Packets are not allowed until cvmx_mgmt_port_enable() is called */
306        mix_ctl.u64 = 0;
307        mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
308        mix_ctl.s.en = 1;           /* Enable the port */
309        mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
310        mix_ctl.s.mrq_hwm = 1;      /* MII CB-request FIFO programmable high watermark */
311        cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
312
313        /* Select the mode of operation for the interface. */
314        if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
315        {
316            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
317
318            if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
319                agl_prtx_ctl.s.mode = 0;
320            else if (state->mode == CVMX_MGMT_PORT_MII_MODE)
321                agl_prtx_ctl.s.mode = 1;
322            else
323            {
324                cvmx_dprintf("ERROR: cvmx_mgmt_port_initialize: Invalid mode for MIX(%d)\n", port);
325                return CVMX_MGMT_PORT_INVALID_PARAM;
326            }
327
328            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
329	}
330
331        /* Initialize the physical layer. */
332        if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
333        {
334            /* MII clocks counts are based on the 125Mhz reference, so our
335                delays need to be scaled to match the core clock rate. The
336                "+1" is to make sure rounding always waits a little too
337                long. */
338            uint64_t clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;
339
340            /* Take the DLL and clock tree out of reset */
341            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
342            agl_prtx_ctl.s.clkrst = 0;
343            if (state->mode == CVMX_MGMT_PORT_RGMII_MODE) // RGMII Initialization
344            {
345                agl_prtx_ctl.s.dllrst = 0;
346                agl_prtx_ctl.s.clktx_byp = 0;
347            }
348            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
349            cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));  /* Force write out before wait */
350
351            /* Wait for the DLL to lock.  External 125 MHz reference clock must be stable at this point. */
352            cvmx_wait(256 * clock_scale);
353
354            /* The rest of the config is common between RGMII/MII */
355
356            /* Enable the interface */
357            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
358            agl_prtx_ctl.s.enable = 1;
359            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
360
361            /* Read the value back to force the previous write */
362            agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
363
364            /* Enable the componsation controller */
365            agl_prtx_ctl.s.comp = 1;
366            cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
367            cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));  /* Force write out before wait */
368            cvmx_wait(1024 * clock_scale); // for componsation state to lock.
369        }
370        else if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
371        {
372            /* Force compensation values, as they are not determined properly by HW */
373            cvmx_agl_gmx_drv_ctl_t drv_ctl;
374
375            drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
376            if (port)
377            {
378                drv_ctl.s.byp_en1 = 1;
379                drv_ctl.s.nctl1 = 6;
380                drv_ctl.s.pctl1 = 6;
381            }
382            else
383            {
384                drv_ctl.s.byp_en = 1;
385                drv_ctl.s.nctl = 6;
386                drv_ctl.s.pctl = 6;
387            }
388            cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
389        }
390    }
391    cvmx_error_enable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
392    return CVMX_MGMT_PORT_SUCCESS;
393}
394
395
396/**
397 * Shutdown a management port. This currently disables packet IO
398 * but leaves all hardware and buffers. Another application can then
399 * call initialize() without redoing the hardware setup.
400 *
401 * @param port   Management port
402 *
403 * @return CVMX_MGMT_PORT_SUCCESS or an error code
404 */
405cvmx_mgmt_port_result_t cvmx_mgmt_port_shutdown(int port)
406{
407    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
408        return CVMX_MGMT_PORT_INVALID_PARAM;
409
410    cvmx_error_disable_group(CVMX_ERROR_GROUP_MGMT_PORT, port);
411
412    /* Stop packets from comming in */
413    cvmx_mgmt_port_disable(port);
414
415    /* We don't free any memory so the next intialize can reuse the HW setup */
416    return CVMX_MGMT_PORT_SUCCESS;
417}
418
419
420/**
421 * Enable packet IO on a management port
422 *
423 * @param port   Management port
424 *
425 * @return CVMX_MGMT_PORT_SUCCESS or an error code
426 */
427cvmx_mgmt_port_result_t cvmx_mgmt_port_enable(int port)
428{
429    cvmx_mgmt_port_state_t *state;
430    cvmx_agl_gmx_inf_mode_t agl_gmx_inf_mode;
431    cvmx_agl_gmx_rxx_frm_ctl_t rxx_frm_ctl;
432
433    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
434        return CVMX_MGMT_PORT_INVALID_PARAM;
435
436    state = cvmx_mgmt_port_state_ptr + port;
437
438    cvmx_spinlock_lock(&state->lock);
439
440    rxx_frm_ctl.u64 = 0;
441    rxx_frm_ctl.s.pre_align = 1;
442    rxx_frm_ctl.s.pad_len = 1;  /* When set, disables the length check for non-min sized pkts with padding in the client data */
443    rxx_frm_ctl.s.vlan_len = 1; /* When set, disables the length check for VLAN pkts */
444    rxx_frm_ctl.s.pre_free = 1; /* When set, PREAMBLE checking is  less strict */
445    rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match station SMAC */
446    rxx_frm_ctl.s.ctl_mcst = 1; /* Control Pause Frames can match globally assign Multicast address */
447    rxx_frm_ctl.s.ctl_bck = 1;  /* Forward pause information to TX block */
448    rxx_frm_ctl.s.ctl_drp = 1;  /* Drop Control Pause Frames */
449    rxx_frm_ctl.s.pre_strp = 1; /* Strip off the preamble */
450    rxx_frm_ctl.s.pre_chk = 1;  /* This port is configured to send PREAMBLE+SFD to begin every frame.  GMX checks that the PREAMBLE is sent correctly */
451    cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);
452
453    /* Enable the AGL block */
454    if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
455    {
456        agl_gmx_inf_mode.u64 = 0;
457        agl_gmx_inf_mode.s.en = 1;
458        cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
459    }
460
461    /* Configure the port duplex and enables */
462    cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
463
464    cvmx_spinlock_unlock(&state->lock);
465    return CVMX_MGMT_PORT_SUCCESS;
466}
467
468
469/**
470 * Disable packet IO on a management port
471 *
472 * @param port   Management port
473 *
474 * @return CVMX_MGMT_PORT_SUCCESS or an error code
475 */
476cvmx_mgmt_port_result_t cvmx_mgmt_port_disable(int port)
477{
478    cvmx_mgmt_port_state_t *state;
479    cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
480
481    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
482        return CVMX_MGMT_PORT_INVALID_PARAM;
483
484    state = cvmx_mgmt_port_state_ptr + port;
485
486    cvmx_spinlock_lock(&state->lock);
487
488    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
489    agl_gmx_prtx.s.en = 0;
490    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
491
492    cvmx_spinlock_unlock(&state->lock);
493    return CVMX_MGMT_PORT_SUCCESS;
494}
495
496
497/**
498 * Send a packet out the management port. The packet is copied so
499 * the input buffer isn't used after this call.
500 *
501 * @param port       Management port
502 * @param packet_len Length of the packet to send. It does not include the final CRC
503 * @param buffer     Packet data
504 *
505 * @return CVMX_MGMT_PORT_SUCCESS or an error code
506 */
507cvmx_mgmt_port_result_t cvmx_mgmt_port_send(int port, int packet_len, void *buffer)
508{
509    cvmx_mgmt_port_state_t *state;
510    cvmx_mixx_oring2_t mix_oring2;
511
512    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
513        return CVMX_MGMT_PORT_INVALID_PARAM;
514
515    /* Max sure the packet size is valid */
516    if ((packet_len < 1) || (packet_len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
517        return CVMX_MGMT_PORT_INVALID_PARAM;
518
519    if (buffer == NULL)
520        return CVMX_MGMT_PORT_INVALID_PARAM;
521
522    state = cvmx_mgmt_port_state_ptr + port;
523
524    cvmx_spinlock_lock(&state->lock);
525
526    mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
527    if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
528    {
529        /* No room for another packet */
530        cvmx_spinlock_unlock(&state->lock);
531        return CVMX_MGMT_PORT_NO_MEMORY;
532    }
533    else
534    {
535        /* Copy the packet into the output buffer */
536        memcpy(state->tx_buffers[state->tx_write_index], buffer, packet_len);
537        /* Insert the source MAC */
538        memcpy(state->tx_buffers[state->tx_write_index] + 6, ((char*)&state->mac) + 2, 6);
539        /* Update the TX ring buffer entry size */
540        state->tx_ring[state->tx_write_index].s.len = packet_len;
541        /* This code doesn't support TX timestamps */
542        state->tx_ring[state->tx_write_index].s.tstamp = 0;
543        /* Increment our TX index */
544        state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
545        /* Ring the doorbell, sending the packet */
546        CVMX_SYNCWS;
547        cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
548        if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
549            cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
550
551        cvmx_spinlock_unlock(&state->lock);
552        return CVMX_MGMT_PORT_SUCCESS;
553    }
554}
555
556
557#if defined(__FreeBSD__)
558/**
559 * Send a packet out the management port. The packet is copied so
560 * the input mbuf isn't used after this call.
561 *
562 * @param port       Management port
563 * @param m          Packet mbuf (with pkthdr)
564 *
565 * @return CVMX_MGMT_PORT_SUCCESS or an error code
566 */
567cvmx_mgmt_port_result_t cvmx_mgmt_port_sendm(int port, const struct mbuf *m)
568{
569    cvmx_mgmt_port_state_t *state;
570    cvmx_mixx_oring2_t mix_oring2;
571
572    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
573        return CVMX_MGMT_PORT_INVALID_PARAM;
574
575    /* Max sure the packet size is valid */
576    if ((m->m_pkthdr.len < 1) || (m->m_pkthdr.len > CVMX_MGMT_PORT_TX_BUFFER_SIZE))
577        return CVMX_MGMT_PORT_INVALID_PARAM;
578
579    state = cvmx_mgmt_port_state_ptr + port;
580
581    cvmx_spinlock_lock(&state->lock);
582
583    mix_oring2.u64 = cvmx_read_csr(CVMX_MIXX_ORING2(port));
584    if (mix_oring2.s.odbell >= CVMX_MGMT_PORT_NUM_TX_BUFFERS - 1)
585    {
586        /* No room for another packet */
587        cvmx_spinlock_unlock(&state->lock);
588        return CVMX_MGMT_PORT_NO_MEMORY;
589    }
590    else
591    {
592        /* Copy the packet into the output buffer */
593	m_copydata(m, 0, m->m_pkthdr.len, state->tx_buffers[state->tx_write_index]);
594        /* Update the TX ring buffer entry size */
595        state->tx_ring[state->tx_write_index].s.len = m->m_pkthdr.len;
596        /* This code doesn't support TX timestamps */
597        state->tx_ring[state->tx_write_index].s.tstamp = 0;
598        /* Increment our TX index */
599        state->tx_write_index = (state->tx_write_index + 1) % CVMX_MGMT_PORT_NUM_TX_BUFFERS;
600        /* Ring the doorbell, sending the packet */
601        CVMX_SYNCWS;
602        cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
603        if (cvmx_read_csr(CVMX_MIXX_ORCNT(port)))
604            cvmx_write_csr(CVMX_MIXX_ORCNT(port), cvmx_read_csr(CVMX_MIXX_ORCNT(port)));
605
606        cvmx_spinlock_unlock(&state->lock);
607        return CVMX_MGMT_PORT_SUCCESS;
608    }
609}
610#endif
611
612
613/**
614 * Receive a packet from the management port.
615 *
616 * @param port       Management port
617 * @param buffer_len Size of the buffer to receive the packet into
618 * @param buffer     Buffer to receive the packet into
619 *
620 * @return The size of the packet, or a negative erorr code on failure. Zero
621 *         means that no packets were available.
622 */
623int cvmx_mgmt_port_receive(int port, int buffer_len, uint8_t *buffer)
624{
625    cvmx_mixx_ircnt_t mix_ircnt;
626    cvmx_mgmt_port_state_t *state;
627    int result;
628
629    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
630        return CVMX_MGMT_PORT_INVALID_PARAM;
631
632    /* Max sure the buffer size is valid */
633    if (buffer_len < 1)
634        return CVMX_MGMT_PORT_INVALID_PARAM;
635
636    if (buffer == NULL)
637        return CVMX_MGMT_PORT_INVALID_PARAM;
638
639    state = cvmx_mgmt_port_state_ptr + port;
640
641    cvmx_spinlock_lock(&state->lock);
642
643    /* Find out how many RX packets are pending */
644    mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
645    if (mix_ircnt.s.ircnt)
646    {
647        uint64_t *source = (void *)state->rx_buffers[state->rx_read_index];
648	uint64_t *zero_check = source;
649        /* CN56XX pass 1 has an errata where packets might start 8 bytes
650            into the buffer instead of at their correct lcoation. If the
651            first 8 bytes is zero we assume this has happened */
652        if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) && (*zero_check == 0))
653            source++;
654        /* Start off with zero bytes received */
655        result = 0;
656        /* While the completion code signals more data, copy the buffers
657            into the user's data */
658        while (state->rx_ring[state->rx_read_index].s.code == 16)
659        {
660            /* Only copy what will fit in the user's buffer */
661            int length = state->rx_ring[state->rx_read_index].s.len;
662            if (length > buffer_len)
663                length = buffer_len;
664            memcpy(buffer, source, length);
665            /* Reduce the size of the buffer to the remaining space. If we run
666                out we will signal an error when the code 15 buffer doesn't fit */
667            buffer += length;
668            buffer_len -= length;
669            result += length;
670            /* Update this buffer for reuse in future receives. This size is
671                -8 due to an errata for CN56XX pass 1 */
672            state->rx_ring[state->rx_read_index].s.code = 0;
673            state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
674            state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
675            /* Zero the beginning of the buffer for use by the errata check */
676            *zero_check = 0;
677            CVMX_SYNCWS;
678            /* Increment the number of RX buffers */
679            cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
680            source = (void *)state->rx_buffers[state->rx_read_index];
681            zero_check = source;
682        }
683
684        /* Check for the final good completion code */
685        if (state->rx_ring[state->rx_read_index].s.code == 15)
686        {
687            if (buffer_len >= state->rx_ring[state->rx_read_index].s.len)
688            {
689                int length = state->rx_ring[state->rx_read_index].s.len;
690                memcpy(buffer, source, length);
691                result += length;
692            }
693            else
694            {
695                /* Not enough room for the packet */
696                cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Packet (%d) larger than supplied buffer (%d)\n", state->rx_ring[state->rx_read_index].s.len, buffer_len);
697                result = CVMX_MGMT_PORT_NO_MEMORY;
698            }
699        }
700        else
701        {
702            cvmx_dprintf("ERROR: cvmx_mgmt_port_receive: Receive error code %d. Packet dropped(Len %d), \n",
703                         state->rx_ring[state->rx_read_index].s.code, state->rx_ring[state->rx_read_index].s.len + result);
704            result = -state->rx_ring[state->rx_read_index].s.code;
705
706
707            /* Check to see if we need to change the duplex. */
708            cvmx_mgmt_port_link_set(port, cvmx_mgmt_port_link_get(port));
709        }
710
711        /* Clean out the ring buffer entry. This size is -8 due to an errata
712            for CN56XX pass 1 */
713        state->rx_ring[state->rx_read_index].s.code = 0;
714        state->rx_ring[state->rx_read_index].s.len = CVMX_MGMT_PORT_RX_BUFFER_SIZE - 8;
715        state->rx_read_index = (state->rx_read_index + 1) % CVMX_MGMT_PORT_NUM_RX_BUFFERS;
716        /* Zero the beginning of the buffer for use by the errata check */
717        *zero_check = 0;
718        CVMX_SYNCWS;
719        /* Increment the number of RX buffers */
720        cvmx_write_csr(CVMX_MIXX_IRING2(port), 1);
721        /* Decrement the pending RX count */
722        cvmx_write_csr(CVMX_MIXX_IRCNT(port), 1);
723    }
724    else
725    {
726        /* No packets available */
727        result = 0;
728    }
729    cvmx_spinlock_unlock(&state->lock);
730    return result;
731}
732
733/**
734 * Set the MAC address for a management port
735 *
736 * @param port   Management port
737 * @param mac    New MAC address. The lower 6 bytes are used.
738 *
739 * @return CVMX_MGMT_PORT_SUCCESS or an error code
740 */
741cvmx_mgmt_port_result_t cvmx_mgmt_port_set_mac(int port, uint64_t mac)
742{
743    cvmx_mgmt_port_state_t *state;
744    cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
745
746    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
747        return CVMX_MGMT_PORT_INVALID_PARAM;
748
749    state = cvmx_mgmt_port_state_ptr + port;
750
751    cvmx_spinlock_lock(&state->lock);
752
753    agl_gmx_rxx_adr_ctl.u64 = 0;
754    agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Only accept matching MAC addresses */
755    agl_gmx_rxx_adr_ctl.s.mcst = 0;     /* Drop multicast */
756    agl_gmx_rxx_adr_ctl.s.bcst = 1;     /* Allow broadcast */
757    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
758
759    /* Only using one of the CAMs */
760    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), (mac >> 40) & 0xff);
761    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), (mac >> 32) & 0xff);
762    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), (mac >> 24) & 0xff);
763    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), (mac >> 16) & 0xff);
764    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), (mac >> 8) & 0xff);
765    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), (mac >> 0) & 0xff);
766    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
767    state->mac = mac;
768
769    cvmx_spinlock_unlock(&state->lock);
770    return CVMX_MGMT_PORT_SUCCESS;
771}
772
773
774/**
775 * Get the MAC address for a management port
776 *
777 * @param port   Management port
778 *
779 * @return MAC address
780 */
781uint64_t cvmx_mgmt_port_get_mac(int port)
782{
783    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
784        return CVMX_MGMT_PORT_INVALID_PARAM;
785
786    return cvmx_mgmt_port_state_ptr[port].mac;
787}
788
789/**
790 * Set the multicast list.
791 *
792 * @param port   Management port
793 * @param flags  Interface flags
794 *
795 * @return
796 */
797void cvmx_mgmt_port_set_multicast_list(int port, int flags)
798{
799    cvmx_mgmt_port_state_t *state;
800    cvmx_agl_gmx_rxx_adr_ctl_t agl_gmx_rxx_adr_ctl;
801
802    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
803        return;
804
805    state = cvmx_mgmt_port_state_ptr + port;
806
807    cvmx_spinlock_lock(&state->lock);
808
809    agl_gmx_rxx_adr_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port));
810
811    /* Allow broadcast MAC addresses */
812    if (!agl_gmx_rxx_adr_ctl.s.bcst)
813	agl_gmx_rxx_adr_ctl.s.bcst = 1;
814
815    if ((flags & CVMX_IFF_ALLMULTI) || (flags & CVMX_IFF_PROMISC))
816	agl_gmx_rxx_adr_ctl.s.mcst = 2; /* Force accept multicast packets */
817    else
818	agl_gmx_rxx_adr_ctl.s.mcst = 1; /* Force reject multicast packets */
819
820    if (flags & CVMX_IFF_PROMISC)
821	agl_gmx_rxx_adr_ctl.s.cam_mode = 0; /* Reject matches if promisc. Since CAM is shut off, should accept everything */
822    else
823	agl_gmx_rxx_adr_ctl.s.cam_mode = 1; /* Filter packets based on the CAM */
824
825    cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), agl_gmx_rxx_adr_ctl.u64);
826
827    if (flags & CVMX_IFF_PROMISC)
828	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 0);
829    else
830	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), 1);
831
832    cvmx_spinlock_unlock(&state->lock);
833}
834
835
836/**
837 * Set the maximum packet allowed in. Size is specified
838 * including L2 but without FCS. A normal MTU would corespond
839 * to 1514 assuming the standard 14 byte L2 header.
840 *
841 * @param port   Management port
842 * @param size_without_fcs
843 *               Size in bytes without FCS
844 */
845void cvmx_mgmt_port_set_max_packet_size(int port, int size_without_fcs)
846{
847    cvmx_mgmt_port_state_t *state;
848
849    if ((port < 0) || (port >= __cvmx_mgmt_port_num_ports()))
850        return;
851
852    state = cvmx_mgmt_port_state_ptr + port;
853
854    cvmx_spinlock_lock(&state->lock);
855    cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs);
856    cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), (size_without_fcs+7) & 0xfff8);
857    cvmx_spinlock_unlock(&state->lock);
858}
859
860/**
861 * Return the link state of an RGMII/MII port as returned by
862 * auto negotiation. The result of this function may not match
863 * Octeon's link config if auto negotiation has changed since
864 * the last call to cvmx_mgmt_port_link_set().
865 *
866 * @param port     The RGMII/MII interface port to query
867 *
868 * @return Link state
869 */
870cvmx_helper_link_info_t cvmx_mgmt_port_link_get(int port)
871{
872    cvmx_mgmt_port_state_t *state;
873    cvmx_helper_link_info_t result;
874
875    state = cvmx_mgmt_port_state_ptr + port;
876    result.u64 = 0;
877
878    if (port > __cvmx_mgmt_port_num_ports())
879    {
880        cvmx_dprintf("WARNING: Invalid port %d\n", port);
881        return result;
882    }
883
884    if (state->port != -1)
885        return __cvmx_helper_board_link_get(state->port);
886    else // Simulator does not have PHY, use some defaults.
887    {
888        result.s.full_duplex = 1;
889        result.s.link_up = 1;
890        result.s.speed = 100;
891        return result;
892    }
893    return result;
894}
895
896/**
897 * Configure RGMII/MII port for the specified link state. This
898 * function does not influence auto negotiation at the PHY level.
899 *
900 * @param port      RGMII/MII interface port
901 * @param link_info The new link state
902 *
903 * @return Zero on success, negative on failure
904 */
905int cvmx_mgmt_port_link_set(int port, cvmx_helper_link_info_t link_info)
906{
907    cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;
908
909    /* Disable GMX before we make any changes. */
910    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
911    agl_gmx_prtx.s.en = 0;
912    agl_gmx_prtx.s.tx_en = 0;
913    agl_gmx_prtx.s.rx_en = 0;
914    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
915
916    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
917    {
918        uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
919        /* Wait for GMX to be idle */
920        if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, rx_idle, ==, 1, one_second)
921            || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port), cvmx_agl_gmx_prtx_cfg_t, tx_idle, ==, 1, one_second))
922        {
923            cvmx_dprintf("MIX%d: Timeout waiting for GMX to be idle\n", port);
924            return -1;
925        }
926    }
927
928    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
929
930    /* Set duplex mode */
931    if (!link_info.s.link_up)
932        agl_gmx_prtx.s.duplex = 1;   /* Force full duplex on down links */
933    else
934        agl_gmx_prtx.s.duplex = link_info.s.full_duplex;
935
936   switch(link_info.s.speed)
937    {
938        case 10:
939            agl_gmx_prtx.s.speed = 0;
940            agl_gmx_prtx.s.slottime = 0;
941            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
942            {
943                agl_gmx_prtx.s.speed_msb = 1;
944                agl_gmx_prtx.s.burst = 1;
945            }
946         break;
947
948        case 100:
949            agl_gmx_prtx.s.speed = 0;
950            agl_gmx_prtx.s.slottime = 0;
951            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
952            {
953                agl_gmx_prtx.s.speed_msb = 0;
954                agl_gmx_prtx.s.burst = 1;
955            }
956            break;
957
958        case 1000:
959            /* 1000 MBits is only supported on 6XXX chips */
960            if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
961            {
962                agl_gmx_prtx.s.speed_msb = 0;
963                agl_gmx_prtx.s.speed = 1;
964                agl_gmx_prtx.s.slottime = 1;  /* Only matters for half-duplex */
965                agl_gmx_prtx.s.burst = agl_gmx_prtx.s.duplex;
966            }
967            break;
968
969        /* No link */
970        case 0:
971        default:
972            break;
973    }
974
975    /* Write the new GMX setting with the port still disabled. */
976    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
977
978    /* Read GMX CFG again to make sure the config is completed. */
979    agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
980
981
982    if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
983    {
984        cvmx_mgmt_port_state_t *state = cvmx_mgmt_port_state_ptr + port;
985        cvmx_agl_gmx_txx_clk_t agl_clk;
986        agl_clk.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_CLK(port));
987        agl_clk.s.clk_cnt = 1;    /* MII (both speeds) and RGMII 1000 setting */
988        if (state->mode == CVMX_MGMT_PORT_RGMII_MODE)
989        {
990            if (link_info.s.speed == 10)
991                agl_clk.s.clk_cnt = 50;
992            else if (link_info.s.speed == 100)
993                agl_clk.s.clk_cnt = 5;
994        }
995        cvmx_write_csr(CVMX_AGL_GMX_TXX_CLK(port), agl_clk.u64);
996    }
997
998    /* Enable transmit and receive ports */
999    agl_gmx_prtx.s.tx_en = 1;
1000    agl_gmx_prtx.s.rx_en = 1;
1001    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1002
1003    /* Enable the link. */
1004    agl_gmx_prtx.s.en = 1;
1005    cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
1006    return 0;
1007}
1008