1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * DPAA2 Ethernet Switch declarations
4 *
5 * Copyright 2014-2016 Freescale Semiconductor Inc.
6 * Copyright 2017-2021 NXP
7 *
8 */
9
10#ifndef __ETHSW_H
11#define __ETHSW_H
12
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/if_vlan.h>
17#include <uapi/linux/if_bridge.h>
18#include <net/switchdev.h>
19#include <linux/if_bridge.h>
20#include <linux/fsl/mc.h>
21#include <net/pkt_cls.h>
22#include <soc/fsl/dpaa2-io.h>
23
24#include "dpaa2-mac.h"
25#include "dpsw.h"
26
27/* Number of IRQs supported */
28#define DPSW_IRQ_NUM 2
29
30/* Port is member of VLAN */
31#define ETHSW_VLAN_MEMBER 1
32/* VLAN to be treated as untagged on egress */
33#define ETHSW_VLAN_UNTAGGED 2
34/* Untagged frames will be assigned to this VLAN */
35#define ETHSW_VLAN_PVID 4
36/* VLAN configured on the switch */
37#define ETHSW_VLAN_GLOBAL 8
38
39/* Maximum Frame Length supported by HW (currently 10k) */
40#define DPAA2_MFL (10 * 1024)
41#define ETHSW_MAX_FRAME_LENGTH (DPAA2_MFL - VLAN_ETH_HLEN - ETH_FCS_LEN)
42#define ETHSW_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN + ETH_FCS_LEN)
43
44#define ETHSW_FEATURE_MAC_ADDR BIT(0)
45
46/* Number of receive queues (one RX and one TX_CONF) */
47#define DPAA2_SWITCH_RX_NUM_FQS 2
48
49/* Hardware requires alignment for ingress/egress buffer addresses */
50#define DPAA2_SWITCH_RX_BUF_RAW_SIZE PAGE_SIZE
51#define DPAA2_SWITCH_RX_BUF_TAILROOM \
52 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
53#define DPAA2_SWITCH_RX_BUF_SIZE \
54 (DPAA2_SWITCH_RX_BUF_RAW_SIZE - DPAA2_SWITCH_RX_BUF_TAILROOM)
55
56#define DPAA2_SWITCH_STORE_SIZE 16
57
58/* Buffer management */
59#define BUFS_PER_CMD 7
60#define DPAA2_ETHSW_NUM_BUFS (1024 * BUFS_PER_CMD)
61#define DPAA2_ETHSW_REFILL_THRESH (DPAA2_ETHSW_NUM_BUFS * 5 / 6)
62
63/* Number of times to retry DPIO portal operations while waiting
64 * for portal to finish executing current command and become
65 * available. We want to avoid being stuck in a while loop in case
66 * hardware becomes unresponsive, but not give up too easily if
67 * the portal really is busy for valid reasons
68 */
69#define DPAA2_SWITCH_SWP_BUSY_RETRIES 1000
70
71/* Hardware annotation buffer size */
72#define DPAA2_SWITCH_HWA_SIZE 64
73/* Software annotation buffer size */
74#define DPAA2_SWITCH_SWA_SIZE 64
75
76#define DPAA2_SWITCH_TX_BUF_ALIGN 64
77
78#define DPAA2_SWITCH_TX_DATA_OFFSET \
79 (DPAA2_SWITCH_HWA_SIZE + DPAA2_SWITCH_SWA_SIZE)
80
81#define DPAA2_SWITCH_NEEDED_HEADROOM \
82 (DPAA2_SWITCH_TX_DATA_OFFSET + DPAA2_SWITCH_TX_BUF_ALIGN)
83
84#define DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES 16
85#define DPAA2_ETHSW_PORT_DEFAULT_TRAPS 1
86
87#define DPAA2_ETHSW_PORT_ACL_CMD_BUF_SIZE 256
88
89extern const struct ethtool_ops dpaa2_switch_port_ethtool_ops;
90
91struct ethsw_core;
92
93struct dpaa2_switch_fq {
94 struct ethsw_core *ethsw;
95 enum dpsw_queue_type type;
96 struct dpaa2_io_store *store;
97 struct dpaa2_io_notification_ctx nctx;
98 struct napi_struct napi;
99 u32 fqid;
100};
101
102struct dpaa2_switch_fdb {
103 struct net_device *bridge_dev;
104 u16 fdb_id;
105 bool in_use;
106};
107
108struct dpaa2_switch_acl_entry {
109 struct list_head list;
110 u16 prio;
111 unsigned long cookie;
112
113 struct dpsw_acl_entry_cfg cfg;
114 struct dpsw_acl_key key;
115};
116
117struct dpaa2_switch_mirror_entry {
118 struct list_head list;
119 struct dpsw_reflection_cfg cfg;
120 unsigned long cookie;
121 u16 if_id;
122};
123
124struct dpaa2_switch_filter_block {
125 struct ethsw_core *ethsw;
126 u64 ports;
127 bool in_use;
128
129 struct list_head acl_entries;
130 u16 acl_id;
131 u8 num_acl_rules;
132
133 struct list_head mirror_entries;
134};
135
136static inline bool
137dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block)
138{
139 if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >=
140 DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES)
141 return true;
142 return false;
143}
144
145/* Per port private data */
146struct ethsw_port_priv {
147 struct net_device *netdev;
148 u16 idx;
149 struct ethsw_core *ethsw_data;
150 u8 link_state;
151 u8 stp_state;
152
153 u8 vlans[VLAN_VID_MASK + 1];
154 u16 pvid;
155 u16 tx_qdid;
156
157 struct dpaa2_switch_fdb *fdb;
158 bool bcast_flood;
159 bool ucast_flood;
160 bool learn_ena;
161
162 struct dpaa2_switch_filter_block *filter_block;
163 struct dpaa2_mac *mac;
164 /* Protects against changes to port_priv->mac */
165 struct mutex mac_lock;
166};
167
168/* Switch data */
169struct ethsw_core {
170 struct device *dev;
171 struct fsl_mc_io *mc_io;
172 u16 dpsw_handle;
173 struct dpsw_attr sw_attr;
174 u16 major, minor;
175 unsigned long features;
176 int dev_id;
177 struct ethsw_port_priv **ports;
178 struct iommu_domain *iommu_domain;
179
180 u8 vlans[VLAN_VID_MASK + 1];
181
182 struct workqueue_struct *workqueue;
183
184 struct dpaa2_switch_fq fq[DPAA2_SWITCH_RX_NUM_FQS];
185 struct fsl_mc_device *dpbp_dev;
186 int buf_count;
187 u16 bpid;
188 int napi_users;
189
190 struct dpaa2_switch_fdb *fdbs;
191 struct dpaa2_switch_filter_block *filter_blocks;
192 u16 mirror_port;
193};
194
195static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw,
196 struct net_device *netdev)
197{
198 int i;
199
200 for (i = 0; i < ethsw->sw_attr.num_ifs; i++)
201 if (ethsw->ports[i]->netdev == netdev)
202 return ethsw->ports[i]->idx;
203
204 return -EINVAL;
205}
206
207static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
208{
209 if (ethsw->sw_attr.options & DPSW_OPT_CTRL_IF_DIS) {
210 dev_err(ethsw->dev, "Control Interface is disabled, cannot probe\n");
211 return false;
212 }
213
214 if (ethsw->sw_attr.flooding_cfg != DPSW_FLOODING_PER_FDB) {
215 dev_err(ethsw->dev, "Flooding domain is not per FDB, cannot probe\n");
216 return false;
217 }
218
219 if (ethsw->sw_attr.broadcast_cfg != DPSW_BROADCAST_PER_FDB) {
220 dev_err(ethsw->dev, "Broadcast domain is not per FDB, cannot probe\n");
221 return false;
222 }
223
224 if (ethsw->sw_attr.max_fdbs < ethsw->sw_attr.num_ifs) {
225 dev_err(ethsw->dev, "The number of FDBs is lower than the number of ports, cannot probe\n");
226 return false;
227 }
228
229 return true;
230}
231
232static inline bool
233dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
234{
235 return dpaa2_mac_is_type_phy(mac: port_priv->mac);
236}
237
238static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
239{
240 return port_priv->mac ? true : false;
241}
242
243bool dpaa2_switch_port_dev_check(const struct net_device *netdev);
244
245int dpaa2_switch_port_vlans_add(struct net_device *netdev,
246 const struct switchdev_obj_port_vlan *vlan);
247
248int dpaa2_switch_port_vlans_del(struct net_device *netdev,
249 const struct switchdev_obj_port_vlan *vlan);
250
251typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv,
252 struct fdb_dump_entry *fdb_entry,
253 void *data);
254
255/* TC offload */
256
257int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block,
258 struct flow_cls_offload *cls);
259
260int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block,
261 struct flow_cls_offload *cls);
262
263int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block,
264 struct tc_cls_matchall_offload *cls);
265
266int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block,
267 struct tc_cls_matchall_offload *cls);
268
269int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block,
270 struct dpaa2_switch_acl_entry *entry);
271
272int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block,
273 struct ethsw_port_priv *port_priv);
274
275int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block,
276 struct ethsw_port_priv *port_priv);
277#endif /* __ETHSW_H */
278

source code of linux/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h