1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | #include "netlink.h" |
4 | #include "common.h" |
5 | |
6 | struct rings_req_info { |
7 | struct ethnl_req_info base; |
8 | }; |
9 | |
10 | struct rings_reply_data { |
11 | struct ethnl_reply_data base; |
12 | struct ethtool_ringparam ringparam; |
13 | struct kernel_ethtool_ringparam kernel_ringparam; |
14 | u32 supported_ring_params; |
15 | }; |
16 | |
17 | #define RINGS_REPDATA(__reply_base) \ |
18 | container_of(__reply_base, struct rings_reply_data, base) |
19 | |
20 | const struct nla_policy ethnl_rings_get_policy[] = { |
21 | [ETHTOOL_A_RINGS_HEADER] = |
22 | NLA_POLICY_NESTED(ethnl_header_policy), |
23 | }; |
24 | |
25 | static int rings_prepare_data(const struct ethnl_req_info *req_base, |
26 | struct ethnl_reply_data *reply_base, |
27 | const struct genl_info *info) |
28 | { |
29 | struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
30 | struct net_device *dev = reply_base->dev; |
31 | int ret; |
32 | |
33 | if (!dev->ethtool_ops->get_ringparam) |
34 | return -EOPNOTSUPP; |
35 | |
36 | data->supported_ring_params = dev->ethtool_ops->supported_ring_params; |
37 | ret = ethnl_ops_begin(dev); |
38 | if (ret < 0) |
39 | return ret; |
40 | dev->ethtool_ops->get_ringparam(dev, &data->ringparam, |
41 | &data->kernel_ringparam, info->extack); |
42 | ethnl_ops_complete(dev); |
43 | |
44 | return 0; |
45 | } |
46 | |
47 | static int rings_reply_size(const struct ethnl_req_info *req_base, |
48 | const struct ethnl_reply_data *reply_base) |
49 | { |
50 | return nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MAX */ |
51 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ |
52 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ |
53 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX_MAX */ |
54 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX */ |
55 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MINI */ |
56 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_JUMBO */ |
57 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX */ |
58 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ |
59 | nla_total_size(payload: sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ |
60 | nla_total_size(payload: sizeof(u32) + /* _RINGS_CQE_SIZE */ |
61 | nla_total_size(payload: sizeof(u8)) + /* _RINGS_TX_PUSH */ |
62 | nla_total_size(payload: sizeof(u8))) + /* _RINGS_RX_PUSH */ |
63 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ |
64 | nla_total_size(payload: sizeof(u32)); /* _RINGS_TX_PUSH_BUF_LEN_MAX */ |
65 | } |
66 | |
67 | static int rings_fill_reply(struct sk_buff *skb, |
68 | const struct ethnl_req_info *req_base, |
69 | const struct ethnl_reply_data *reply_base) |
70 | { |
71 | const struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
72 | const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; |
73 | const struct ethtool_ringparam *ringparam = &data->ringparam; |
74 | u32 supported_ring_params = data->supported_ring_params; |
75 | |
76 | WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); |
77 | |
78 | if ((ringparam->rx_max_pending && |
79 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MAX, |
80 | value: ringparam->rx_max_pending) || |
81 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX, |
82 | value: ringparam->rx_pending))) || |
83 | (ringparam->rx_mini_max_pending && |
84 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MINI_MAX, |
85 | value: ringparam->rx_mini_max_pending) || |
86 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MINI, |
87 | value: ringparam->rx_mini_pending))) || |
88 | (ringparam->rx_jumbo_max_pending && |
89 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_JUMBO_MAX, |
90 | value: ringparam->rx_jumbo_max_pending) || |
91 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_JUMBO, |
92 | value: ringparam->rx_jumbo_pending))) || |
93 | (ringparam->tx_max_pending && |
94 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_MAX, |
95 | value: ringparam->tx_max_pending) || |
96 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX, |
97 | value: ringparam->tx_pending))) || |
98 | (kr->rx_buf_len && |
99 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_BUF_LEN, value: kr->rx_buf_len))) || |
100 | (kr->tcp_data_split && |
101 | (nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_TCP_DATA_SPLIT, |
102 | value: kr->tcp_data_split))) || |
103 | (kr->cqe_size && |
104 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_CQE_SIZE, value: kr->cqe_size))) || |
105 | nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH, value: !!kr->tx_push) || |
106 | nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_RX_PUSH, value: !!kr->rx_push) || |
107 | ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && |
108 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, |
109 | value: kr->tx_push_buf_max_len) || |
110 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, |
111 | value: kr->tx_push_buf_len)))) |
112 | return -EMSGSIZE; |
113 | |
114 | return 0; |
115 | } |
116 | |
117 | /* RINGS_SET */ |
118 | |
119 | const struct nla_policy ethnl_rings_set_policy[] = { |
120 | [ETHTOOL_A_RINGS_HEADER] = |
121 | NLA_POLICY_NESTED(ethnl_header_policy), |
122 | [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, |
123 | [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, |
124 | [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, |
125 | [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, |
126 | [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), |
127 | [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = |
128 | NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED), |
129 | [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), |
130 | [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), |
131 | [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), |
132 | [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, |
133 | }; |
134 | |
135 | static int |
136 | ethnl_set_rings_validate(struct ethnl_req_info *req_info, |
137 | struct genl_info *info) |
138 | { |
139 | const struct ethtool_ops *ops = req_info->dev->ethtool_ops; |
140 | struct nlattr **tb = info->attrs; |
141 | |
142 | if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && |
143 | !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { |
144 | NL_SET_ERR_MSG_ATTR(info->extack, |
145 | tb[ETHTOOL_A_RINGS_RX_BUF_LEN], |
146 | "setting rx buf len not supported" ); |
147 | return -EOPNOTSUPP; |
148 | } |
149 | |
150 | if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && |
151 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) { |
152 | NL_SET_ERR_MSG_ATTR(info->extack, |
153 | tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], |
154 | "setting TCP data split is not supported" ); |
155 | return -EOPNOTSUPP; |
156 | } |
157 | |
158 | if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && |
159 | !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { |
160 | NL_SET_ERR_MSG_ATTR(info->extack, |
161 | tb[ETHTOOL_A_RINGS_CQE_SIZE], |
162 | "setting cqe size not supported" ); |
163 | return -EOPNOTSUPP; |
164 | } |
165 | |
166 | if (tb[ETHTOOL_A_RINGS_TX_PUSH] && |
167 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { |
168 | NL_SET_ERR_MSG_ATTR(info->extack, |
169 | tb[ETHTOOL_A_RINGS_TX_PUSH], |
170 | "setting tx push not supported" ); |
171 | return -EOPNOTSUPP; |
172 | } |
173 | |
174 | if (tb[ETHTOOL_A_RINGS_RX_PUSH] && |
175 | !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { |
176 | NL_SET_ERR_MSG_ATTR(info->extack, |
177 | tb[ETHTOOL_A_RINGS_RX_PUSH], |
178 | "setting rx push not supported" ); |
179 | return -EOPNOTSUPP; |
180 | } |
181 | |
182 | if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && |
183 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { |
184 | NL_SET_ERR_MSG_ATTR(info->extack, |
185 | tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], |
186 | "setting tx push buf len is not supported" ); |
187 | return -EOPNOTSUPP; |
188 | } |
189 | |
190 | return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; |
191 | } |
192 | |
193 | static int |
194 | ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) |
195 | { |
196 | struct kernel_ethtool_ringparam kernel_ringparam = {}; |
197 | struct ethtool_ringparam ringparam = {}; |
198 | struct net_device *dev = req_info->dev; |
199 | struct nlattr **tb = info->attrs; |
200 | const struct nlattr *err_attr; |
201 | bool mod = false; |
202 | int ret; |
203 | |
204 | dev->ethtool_ops->get_ringparam(dev, &ringparam, |
205 | &kernel_ringparam, info->extack); |
206 | |
207 | ethnl_update_u32(dst: &ringparam.rx_pending, attr: tb[ETHTOOL_A_RINGS_RX], mod: &mod); |
208 | ethnl_update_u32(dst: &ringparam.rx_mini_pending, |
209 | attr: tb[ETHTOOL_A_RINGS_RX_MINI], mod: &mod); |
210 | ethnl_update_u32(dst: &ringparam.rx_jumbo_pending, |
211 | attr: tb[ETHTOOL_A_RINGS_RX_JUMBO], mod: &mod); |
212 | ethnl_update_u32(dst: &ringparam.tx_pending, attr: tb[ETHTOOL_A_RINGS_TX], mod: &mod); |
213 | ethnl_update_u32(dst: &kernel_ringparam.rx_buf_len, |
214 | attr: tb[ETHTOOL_A_RINGS_RX_BUF_LEN], mod: &mod); |
215 | ethnl_update_u8(dst: &kernel_ringparam.tcp_data_split, |
216 | attr: tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], mod: &mod); |
217 | ethnl_update_u32(dst: &kernel_ringparam.cqe_size, |
218 | attr: tb[ETHTOOL_A_RINGS_CQE_SIZE], mod: &mod); |
219 | ethnl_update_u8(dst: &kernel_ringparam.tx_push, |
220 | attr: tb[ETHTOOL_A_RINGS_TX_PUSH], mod: &mod); |
221 | ethnl_update_u8(dst: &kernel_ringparam.rx_push, |
222 | attr: tb[ETHTOOL_A_RINGS_RX_PUSH], mod: &mod); |
223 | ethnl_update_u32(dst: &kernel_ringparam.tx_push_buf_len, |
224 | attr: tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], mod: &mod); |
225 | if (!mod) |
226 | return 0; |
227 | |
228 | /* ensure new ring parameters are within limits */ |
229 | if (ringparam.rx_pending > ringparam.rx_max_pending) |
230 | err_attr = tb[ETHTOOL_A_RINGS_RX]; |
231 | else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) |
232 | err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; |
233 | else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) |
234 | err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; |
235 | else if (ringparam.tx_pending > ringparam.tx_max_pending) |
236 | err_attr = tb[ETHTOOL_A_RINGS_TX]; |
237 | else |
238 | err_attr = NULL; |
239 | if (err_attr) { |
240 | NL_SET_ERR_MSG_ATTR(info->extack, err_attr, |
241 | "requested ring size exceeds maximum" ); |
242 | return -EINVAL; |
243 | } |
244 | |
245 | if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { |
246 | NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], |
247 | "Requested TX push buffer exceeds the maximum of %u" , |
248 | kernel_ringparam.tx_push_buf_max_len); |
249 | |
250 | return -EINVAL; |
251 | } |
252 | |
253 | ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, |
254 | &kernel_ringparam, info->extack); |
255 | return ret < 0 ? ret : 1; |
256 | } |
257 | |
258 | const struct ethnl_request_ops ethnl_rings_request_ops = { |
259 | .request_cmd = ETHTOOL_MSG_RINGS_GET, |
260 | .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, |
261 | .hdr_attr = ETHTOOL_A_RINGS_HEADER, |
262 | .req_info_size = sizeof(struct rings_req_info), |
263 | .reply_data_size = sizeof(struct rings_reply_data), |
264 | |
265 | .prepare_data = rings_prepare_data, |
266 | .reply_size = rings_reply_size, |
267 | .fill_reply = rings_fill_reply, |
268 | |
269 | .set_validate = ethnl_set_rings_validate, |
270 | .set = ethnl_set_rings, |
271 | .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, |
272 | }; |
273 | |