Lines Matching refs:dp

107 	struct dsa_port *dp;
109 list_for_each_entry(dp, &dst->ports, list)
110 if (dsa_port_lag_dev_get(dp) == lag_dev)
111 return dp->lag;
119 struct dsa_port *dp;
121 list_for_each_entry(dp, &dst->ports, list)
122 if (dsa_port_bridge_dev_get(dp) == br)
123 return dp->bridge;
182 struct dsa_port *dp;
188 list_for_each_entry(dp, &dst->ports, list) {
189 if (dp->ds->index != sw_index)
192 return dp->ds;
278 struct dsa_port *dp;
280 list_for_each_entry(dp, &dst->ports, list)
281 if (dp->dn == dn)
282 return dp;
287 static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
290 struct dsa_switch *ds = dp->ds;
297 if (dl->dp == dp && dl->link_dp == link_dp)
304 dl->dp = dp;
313 static bool dsa_port_setup_routing_table(struct dsa_port *dp)
315 struct dsa_switch *ds = dp->ds;
317 struct device_node *dn = dp->dn;
330 dl = dsa_link_touch(dp, link_dp);
343 struct dsa_port *dp;
345 list_for_each_entry(dp, &dst->ports, list) {
346 if (dsa_port_is_dsa(dp)) {
347 complete = dsa_port_setup_routing_table(dp);
358 struct dsa_port *dp;
360 list_for_each_entry(dp, &dst->ports, list)
361 if (dsa_port_is_cpu(dp))
362 return dp;
386 struct dsa_port *cpu_dp, *dp;
394 list_for_each_entry(dp, &dst->ports, list) {
395 if (dp->cpu_dp)
398 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
399 dp->cpu_dp = cpu_dp;
430 struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
441 dsa_switch_for_each_port(dp, cpu_dp->ds) {
443 if (dp->cpu_dp)
446 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
447 dp->cpu_dp = cpu_dp;
456 struct dsa_port *dp;
458 list_for_each_entry(dp, &dst->ports, list)
459 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
460 dp->cpu_dp = NULL;
463 static int dsa_port_setup(struct dsa_port *dp)
466 struct dsa_switch *ds = dp->ds;
470 if (dp->setup)
473 err = dsa_port_devlink_setup(dp);
477 switch (dp->type) {
479 dsa_port_disable(dp);
482 if (dp->dn) {
483 err = dsa_shared_port_link_register_of(dp);
490 dp->index);
493 err = dsa_port_enable(dp, NULL);
500 if (dp->dn) {
501 err = dsa_shared_port_link_register_of(dp);
508 dp->index);
511 err = dsa_port_enable(dp, NULL);
518 of_get_mac_address(dp->dn, dp->mac);
519 err = dsa_user_create(dp);
524 dsa_port_disable(dp);
526 dsa_shared_port_link_unregister_of(dp);
528 dsa_port_devlink_teardown(dp);
532 dp->setup = true;
537 static void dsa_port_teardown(struct dsa_port *dp)
539 if (!dp->setup)
542 switch (dp->type) {
546 dsa_port_disable(dp);
547 if (dp->dn)
548 dsa_shared_port_link_unregister_of(dp);
551 dsa_port_disable(dp);
552 if (dp->dn)
553 dsa_shared_port_link_unregister_of(dp);
556 if (dp->user) {
557 dsa_user_destroy(dp->user);
558 dp->user = NULL;
563 dsa_port_devlink_teardown(dp);
565 dp->setup = false;
568 static int dsa_port_setup_as_unused(struct dsa_port *dp)
570 dp->type = DSA_PORT_TYPE_UNUSED;
571 return dsa_port_setup(dp);
721 struct dsa_port *dp;
723 list_for_each_entry(dp, &dst->ports, list)
724 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
725 dsa_port_teardown(dp);
729 list_for_each_entry(dp, &dst->ports, list)
730 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
731 dsa_port_teardown(dp);
736 struct dsa_port *dp;
738 list_for_each_entry(dp, &dst->ports, list)
739 dsa_switch_teardown(dp->ds);
745 struct dsa_port *dp;
748 list_for_each_entry(dp, &dst->ports, list) {
749 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
750 err = dsa_port_setup(dp);
756 list_for_each_entry(dp, &dst->ports, list) {
757 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
758 err = dsa_port_setup(dp);
760 err = dsa_port_setup_as_unused(dp);
777 struct dsa_port *dp;
780 list_for_each_entry(dp, &dst->ports, list) {
781 err = dsa_switch_setup(dp->ds);
842 struct dsa_port *dp;
844 list_for_each_entry(dp, &dst->ports, list) {
845 if (dp->ds->num_lag_ids > len)
846 len = dp->ds->num_lag_ids;
985 struct dsa_port *dp;
996 dsa_tree_for_each_user_port(dp, dst) {
997 if (dsa_port_to_conduit(dp)->flags & IFF_UP)
1000 if (dp->user->flags & IFF_UP)
1087 struct dsa_port *dp;
1089 dsa_switch_for_each_port(dp, ds)
1090 if (dp->index == index)
1091 return dp;
1093 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1094 if (!dp)
1097 dp->ds = ds;
1098 dp->index = index;
1100 mutex_init(&dp->addr_lists_lock);
1101 mutex_init(&dp->vlans_lock);
1102 INIT_LIST_HEAD(&dp->fdbs);
1103 INIT_LIST_HEAD(&dp->mdbs);
1104 INIT_LIST_HEAD(&dp->vlans); /* also initializes &dp->user_vlans */
1105 INIT_LIST_HEAD(&dp->list);
1106 list_add_tail(&dp->list, &dst->ports);
1108 return dp;
1111 static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1113 dp->type = DSA_PORT_TYPE_USER;
1114 dp->name = name;
1119 static int dsa_port_parse_dsa(struct dsa_port *dp)
1121 dp->type = DSA_PORT_TYPE_DSA;
1126 static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1130 struct dsa_switch *mds, *ds = dp->ds;
1149 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1152 static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit,
1156 struct dsa_switch *ds = dp->ds;
1161 default_proto = dsa_get_tag_protocol(dp, conduit);
1216 dp->conduit = conduit;
1217 dp->type = DSA_PORT_TYPE_CPU;
1218 dsa_port_set_tag_protocol(dp, dst->tag_ops);
1219 dp->dst = dst;
1237 static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1243 dp->dn = dn;
1255 return dsa_port_parse_cpu(dp, conduit, user_protocol);
1259 return dsa_port_parse_dsa(dp);
1261 return dsa_port_parse_user(dp, name);
1268 struct dsa_port *dp;
1297 dp = dsa_to_port(ds, reg);
1299 err = dsa_port_parse_of(dp, port);
1343 struct dsa_port *dp;
1347 dp = dsa_port_touch(ds, port);
1348 if (!dp)
1406 static int dsa_port_parse(struct dsa_port *dp, const char *name,
1418 return dsa_port_parse_cpu(dp, conduit, NULL);
1422 return dsa_port_parse_dsa(dp);
1424 return dsa_port_parse_user(dp, name);
1431 struct dsa_port *dp;
1440 dp = dsa_to_port(ds, i);
1445 err = dsa_port_parse(dp, name, dev);
1481 struct dsa_port *dp, *next;
1483 dsa_switch_for_each_port_safe(dp, next, ds) {
1484 WARN_ON(!list_empty(&dp->fdbs));
1485 WARN_ON(!list_empty(&dp->mdbs));
1486 WARN_ON(!list_empty(&dp->vlans));
1487 list_del(&dp->list);
1488 kfree(dp);
1582 struct dsa_port *dp;
1591 dsa_switch_for_each_user_port(dp, ds) {
1592 conduit = dsa_port_to_conduit(dp);
1593 user_dev = dp->user;
1601 dsa_switch_for_each_cpu_port(dp, ds)
1602 dp->conduit->dsa_ptr = NULL;
1611 static bool dsa_port_is_initialized(const struct dsa_port *dp)
1613 return dp->type == DSA_PORT_TYPE_USER && dp->user;
1618 struct dsa_port *dp;
1622 dsa_switch_for_each_port(dp, ds) {
1623 if (!dsa_port_is_initialized(dp))
1626 ret = dsa_user_suspend(dp->user);
1640 struct dsa_port *dp;
1650 dsa_switch_for_each_port(dp, ds) {
1651 if (!dsa_port_is_initialized(dp))
1654 ret = dsa_user_resume(dp->user);
1680 return a->dp == b->dp;
1695 struct dsa_port *dp = dsa_to_port(ds, port);
1698 lockdep_assert_held(&dp->addr_lists_lock);
1700 list_for_each_entry(a, &dp->fdbs, list) {
1716 struct dsa_port *dp = dsa_to_port(ds, port);
1719 lockdep_assert_held(&dp->addr_lists_lock);
1721 list_for_each_entry(a, &dp->mdbs, list) {