1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Networking over Thunderbolt/USB4 cables using USB4NET protocol |
4 | * (formerly Apple ThunderboltIP). |
5 | * |
6 | * Copyright (C) 2017, Intel Corporation |
7 | * Authors: Amir Levy <amir.jer.levy@intel.com> |
8 | * Michael Jamet <michael.jamet@intel.com> |
9 | * Mika Westerberg <mika.westerberg@linux.intel.com> |
10 | */ |
11 | |
12 | #include <linux/atomic.h> |
13 | #include <linux/highmem.h> |
14 | #include <linux/if_vlan.h> |
15 | #include <linux/jhash.h> |
16 | #include <linux/module.h> |
17 | #include <linux/etherdevice.h> |
18 | #include <linux/rtnetlink.h> |
19 | #include <linux/sizes.h> |
20 | #include <linux/thunderbolt.h> |
21 | #include <linux/uuid.h> |
22 | #include <linux/workqueue.h> |
23 | |
24 | #include <net/ip6_checksum.h> |
25 | |
26 | #include "trace.h" |
27 | |
28 | /* Protocol timeouts in ms */ |
29 | #define TBNET_LOGIN_DELAY 4500 |
30 | #define TBNET_LOGIN_TIMEOUT 500 |
31 | #define TBNET_LOGOUT_TIMEOUT 1000 |
32 | |
33 | #define TBNET_RING_SIZE 256 |
34 | #define TBNET_LOGIN_RETRIES 60 |
35 | #define TBNET_LOGOUT_RETRIES 10 |
36 | #define TBNET_E2E BIT(0) |
37 | #define TBNET_MATCH_FRAGS_ID BIT(1) |
38 | #define TBNET_64K_FRAMES BIT(2) |
39 | #define TBNET_MAX_MTU SZ_64K |
40 | #define TBNET_FRAME_SIZE SZ_4K |
41 | #define TBNET_MAX_PAYLOAD_SIZE \ |
42 | (TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header)) |
43 | /* Rx packets need to hold space for skb_shared_info */ |
44 | #define TBNET_RX_MAX_SIZE \ |
45 | (TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) |
46 | #define TBNET_RX_PAGE_ORDER get_order(TBNET_RX_MAX_SIZE) |
47 | #define TBNET_RX_PAGE_SIZE (PAGE_SIZE << TBNET_RX_PAGE_ORDER) |
48 | |
49 | #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0)) |
50 | |
51 | /** |
52 | * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame |
53 | * @frame_size: size of the data with the frame |
54 | * @frame_index: running index on the frames |
55 | * @frame_id: ID of the frame to match frames to specific packet |
56 | * @frame_count: how many frames assembles a full packet |
57 | * |
58 | * Each data frame passed to the high-speed DMA ring has this header. If |
59 | * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is |
60 | * supported then @frame_id is filled, otherwise it stays %0. |
61 | */ |
62 | struct { |
63 | __le32 ; |
64 | __le16 ; |
65 | __le16 ; |
66 | __le32 ; |
67 | }; |
68 | |
69 | enum thunderbolt_ip_frame_pdf { |
70 | TBIP_PDF_FRAME_START = 1, |
71 | TBIP_PDF_FRAME_END, |
72 | }; |
73 | |
74 | enum thunderbolt_ip_type { |
75 | TBIP_LOGIN, |
76 | TBIP_LOGIN_RESPONSE, |
77 | TBIP_LOGOUT, |
78 | TBIP_STATUS, |
79 | }; |
80 | |
81 | struct { |
82 | u32 ; |
83 | u32 ; |
84 | u32 ; |
85 | uuid_t ; |
86 | uuid_t ; |
87 | uuid_t ; |
88 | u32 ; |
89 | u32 command_id; |
90 | }; |
91 | |
92 | #define TBIP_HDR_LENGTH_MASK GENMASK(5, 0) |
93 | #define TBIP_HDR_SN_MASK GENMASK(28, 27) |
94 | #define TBIP_HDR_SN_SHIFT 27 |
95 | |
96 | struct thunderbolt_ip_login { |
97 | struct thunderbolt_ip_header hdr; |
98 | u32 proto_version; |
99 | u32 transmit_path; |
100 | u32 reserved[4]; |
101 | }; |
102 | |
103 | #define TBIP_LOGIN_PROTO_VERSION 1 |
104 | |
105 | struct thunderbolt_ip_login_response { |
106 | struct thunderbolt_ip_header hdr; |
107 | u32 status; |
108 | u32 receiver_mac[2]; |
109 | u32 receiver_mac_len; |
110 | u32 reserved[4]; |
111 | }; |
112 | |
113 | struct thunderbolt_ip_logout { |
114 | struct thunderbolt_ip_header hdr; |
115 | }; |
116 | |
117 | struct thunderbolt_ip_status { |
118 | struct thunderbolt_ip_header hdr; |
119 | u32 status; |
120 | }; |
121 | |
122 | struct tbnet_stats { |
123 | u64 tx_packets; |
124 | u64 rx_packets; |
125 | u64 tx_bytes; |
126 | u64 rx_bytes; |
127 | u64 rx_errors; |
128 | u64 tx_errors; |
129 | u64 rx_length_errors; |
130 | u64 rx_over_errors; |
131 | u64 rx_crc_errors; |
132 | u64 rx_missed_errors; |
133 | }; |
134 | |
135 | struct tbnet_frame { |
136 | struct net_device *dev; |
137 | struct page *page; |
138 | struct ring_frame frame; |
139 | }; |
140 | |
141 | struct tbnet_ring { |
142 | struct tbnet_frame frames[TBNET_RING_SIZE]; |
143 | unsigned int cons; |
144 | unsigned int prod; |
145 | struct tb_ring *ring; |
146 | }; |
147 | |
148 | /** |
149 | * struct tbnet - ThunderboltIP network driver private data |
150 | * @svc: XDomain service the driver is bound to |
151 | * @xd: XDomain the service belongs to |
152 | * @handler: ThunderboltIP configuration protocol handler |
153 | * @dev: Networking device |
154 | * @napi: NAPI structure for Rx polling |
155 | * @stats: Network statistics |
156 | * @skb: Network packet that is currently processed on Rx path |
157 | * @command_id: ID used for next configuration protocol packet |
158 | * @login_sent: ThunderboltIP login message successfully sent |
159 | * @login_received: ThunderboltIP login message received from the remote |
160 | * host |
161 | * @local_transmit_path: HopID we are using to send out packets |
162 | * @remote_transmit_path: HopID the other end is using to send packets to us |
163 | * @connection_lock: Lock serializing access to @login_sent, |
164 | * @login_received and @transmit_path. |
165 | * @login_retries: Number of login retries currently done |
166 | * @login_work: Worker to send ThunderboltIP login packets |
167 | * @connected_work: Worker that finalizes the ThunderboltIP connection |
168 | * setup and enables DMA paths for high speed data |
169 | * transfers |
170 | * @disconnect_work: Worker that handles tearing down the ThunderboltIP |
171 | * connection |
172 | * @rx_hdr: Copy of the currently processed Rx frame. Used when a |
173 | * network packet consists of multiple Thunderbolt frames. |
174 | * In host byte order. |
175 | * @rx_ring: Software ring holding Rx frames |
176 | * @frame_id: Frame ID use for next Tx packet |
177 | * (if %TBNET_MATCH_FRAGS_ID is supported in both ends) |
178 | * @tx_ring: Software ring holding Tx frames |
179 | */ |
180 | struct tbnet { |
181 | const struct tb_service *svc; |
182 | struct tb_xdomain *xd; |
183 | struct tb_protocol_handler handler; |
184 | struct net_device *dev; |
185 | struct napi_struct napi; |
186 | struct tbnet_stats stats; |
187 | struct sk_buff *skb; |
188 | atomic_t command_id; |
189 | bool login_sent; |
190 | bool login_received; |
191 | int local_transmit_path; |
192 | int remote_transmit_path; |
193 | struct mutex connection_lock; |
194 | int login_retries; |
195 | struct delayed_work login_work; |
196 | struct work_struct connected_work; |
197 | struct work_struct disconnect_work; |
198 | struct thunderbolt_ip_frame_header rx_hdr; |
199 | struct tbnet_ring rx_ring; |
200 | atomic_t frame_id; |
201 | struct tbnet_ring tx_ring; |
202 | }; |
203 | |
204 | /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */ |
205 | static const uuid_t tbnet_dir_uuid = |
206 | UUID_INIT(0xc66189ca, 0x1cce, 0x4195, |
207 | 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); |
208 | |
209 | /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */ |
210 | static const uuid_t tbnet_svc_uuid = |
211 | UUID_INIT(0x798f589e, 0x3616, 0x8a47, |
212 | 0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd); |
213 | |
214 | static struct tb_property_dir *tbnet_dir; |
215 | |
216 | static bool tbnet_e2e = true; |
217 | module_param_named(e2e, tbnet_e2e, bool, 0444); |
218 | MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)" ); |
219 | |
220 | static void (struct thunderbolt_ip_header *hdr, u64 route, |
221 | u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid, |
222 | enum thunderbolt_ip_type type, size_t size, u32 command_id) |
223 | { |
224 | u32 length_sn; |
225 | |
226 | /* Length does not include route_hi/lo and length_sn fields */ |
227 | length_sn = (size - 3 * 4) / 4; |
228 | length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK; |
229 | |
230 | hdr->route_hi = upper_32_bits(route); |
231 | hdr->route_lo = lower_32_bits(route); |
232 | hdr->length_sn = length_sn; |
233 | uuid_copy(dst: &hdr->uuid, src: &tbnet_svc_uuid); |
234 | uuid_copy(dst: &hdr->initiator_uuid, src: initiator_uuid); |
235 | uuid_copy(dst: &hdr->target_uuid, src: target_uuid); |
236 | hdr->type = type; |
237 | hdr->command_id = command_id; |
238 | } |
239 | |
240 | static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, |
241 | u32 command_id) |
242 | { |
243 | struct thunderbolt_ip_login_response reply; |
244 | struct tb_xdomain *xd = net->xd; |
245 | |
246 | memset(&reply, 0, sizeof(reply)); |
247 | tbnet_fill_header(hdr: &reply.hdr, route, sequence, initiator_uuid: xd->local_uuid, |
248 | target_uuid: xd->remote_uuid, type: TBIP_LOGIN_RESPONSE, size: sizeof(reply), |
249 | command_id); |
250 | memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN); |
251 | reply.receiver_mac_len = ETH_ALEN; |
252 | |
253 | return tb_xdomain_response(xd, response: &reply, size: sizeof(reply), |
254 | type: TB_CFG_PKG_XDOMAIN_RESP); |
255 | } |
256 | |
257 | static int tbnet_login_request(struct tbnet *net, u8 sequence) |
258 | { |
259 | struct thunderbolt_ip_login_response reply; |
260 | struct thunderbolt_ip_login request; |
261 | struct tb_xdomain *xd = net->xd; |
262 | |
263 | memset(&request, 0, sizeof(request)); |
264 | tbnet_fill_header(hdr: &request.hdr, route: xd->route, sequence, initiator_uuid: xd->local_uuid, |
265 | target_uuid: xd->remote_uuid, type: TBIP_LOGIN, size: sizeof(request), |
266 | command_id: atomic_inc_return(v: &net->command_id)); |
267 | |
268 | request.proto_version = TBIP_LOGIN_PROTO_VERSION; |
269 | request.transmit_path = net->local_transmit_path; |
270 | |
271 | return tb_xdomain_request(xd, request: &request, request_size: sizeof(request), |
272 | request_type: TB_CFG_PKG_XDOMAIN_RESP, response: &reply, |
273 | response_size: sizeof(reply), response_type: TB_CFG_PKG_XDOMAIN_RESP, |
274 | TBNET_LOGIN_TIMEOUT); |
275 | } |
276 | |
277 | static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, |
278 | u32 command_id) |
279 | { |
280 | struct thunderbolt_ip_status reply; |
281 | struct tb_xdomain *xd = net->xd; |
282 | |
283 | memset(&reply, 0, sizeof(reply)); |
284 | tbnet_fill_header(hdr: &reply.hdr, route, sequence, initiator_uuid: xd->local_uuid, |
285 | target_uuid: xd->remote_uuid, type: TBIP_STATUS, size: sizeof(reply), |
286 | command_id: atomic_inc_return(v: &net->command_id)); |
287 | return tb_xdomain_response(xd, response: &reply, size: sizeof(reply), |
288 | type: TB_CFG_PKG_XDOMAIN_RESP); |
289 | } |
290 | |
291 | static int tbnet_logout_request(struct tbnet *net) |
292 | { |
293 | struct thunderbolt_ip_logout request; |
294 | struct thunderbolt_ip_status reply; |
295 | struct tb_xdomain *xd = net->xd; |
296 | |
297 | memset(&request, 0, sizeof(request)); |
298 | tbnet_fill_header(hdr: &request.hdr, route: xd->route, sequence: 0, initiator_uuid: xd->local_uuid, |
299 | target_uuid: xd->remote_uuid, type: TBIP_LOGOUT, size: sizeof(request), |
300 | command_id: atomic_inc_return(v: &net->command_id)); |
301 | |
302 | return tb_xdomain_request(xd, request: &request, request_size: sizeof(request), |
303 | request_type: TB_CFG_PKG_XDOMAIN_RESP, response: &reply, |
304 | response_size: sizeof(reply), response_type: TB_CFG_PKG_XDOMAIN_RESP, |
305 | TBNET_LOGOUT_TIMEOUT); |
306 | } |
307 | |
308 | static void start_login(struct tbnet *net) |
309 | { |
310 | netdev_dbg(net->dev, "login started\n" ); |
311 | |
312 | mutex_lock(&net->connection_lock); |
313 | net->login_sent = false; |
314 | net->login_received = false; |
315 | mutex_unlock(lock: &net->connection_lock); |
316 | |
317 | queue_delayed_work(wq: system_long_wq, dwork: &net->login_work, |
318 | delay: msecs_to_jiffies(m: 1000)); |
319 | } |
320 | |
321 | static void stop_login(struct tbnet *net) |
322 | { |
323 | cancel_delayed_work_sync(dwork: &net->login_work); |
324 | cancel_work_sync(work: &net->connected_work); |
325 | |
326 | netdev_dbg(net->dev, "login stopped\n" ); |
327 | } |
328 | |
329 | static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf) |
330 | { |
331 | return tf->frame.size ? : TBNET_FRAME_SIZE; |
332 | } |
333 | |
334 | static void tbnet_free_buffers(struct tbnet_ring *ring) |
335 | { |
336 | unsigned int i; |
337 | |
338 | for (i = 0; i < TBNET_RING_SIZE; i++) { |
339 | struct device *dma_dev = tb_ring_dma_device(ring: ring->ring); |
340 | struct tbnet_frame *tf = &ring->frames[i]; |
341 | enum dma_data_direction dir; |
342 | unsigned int order; |
343 | size_t size; |
344 | |
345 | if (!tf->page) |
346 | continue; |
347 | |
348 | if (ring->ring->is_tx) { |
349 | dir = DMA_TO_DEVICE; |
350 | order = 0; |
351 | size = TBNET_FRAME_SIZE; |
352 | } else { |
353 | dir = DMA_FROM_DEVICE; |
354 | order = TBNET_RX_PAGE_ORDER; |
355 | size = TBNET_RX_PAGE_SIZE; |
356 | } |
357 | |
358 | trace_tbnet_free_frame(index: i, page: tf->page, phys: tf->frame.buffer_phy, dir); |
359 | |
360 | if (tf->frame.buffer_phy) |
361 | dma_unmap_page(dma_dev, tf->frame.buffer_phy, size, |
362 | dir); |
363 | |
364 | __free_pages(page: tf->page, order); |
365 | tf->page = NULL; |
366 | } |
367 | |
368 | ring->cons = 0; |
369 | ring->prod = 0; |
370 | } |
371 | |
372 | static void tbnet_tear_down(struct tbnet *net, bool send_logout) |
373 | { |
374 | netif_carrier_off(dev: net->dev); |
375 | netif_stop_queue(dev: net->dev); |
376 | |
377 | stop_login(net); |
378 | |
379 | mutex_lock(&net->connection_lock); |
380 | |
381 | if (net->login_sent && net->login_received) { |
382 | int ret, retries = TBNET_LOGOUT_RETRIES; |
383 | |
384 | while (send_logout && retries-- > 0) { |
385 | netdev_dbg(net->dev, "sending logout request %u\n" , |
386 | retries); |
387 | ret = tbnet_logout_request(net); |
388 | if (ret != -ETIMEDOUT) |
389 | break; |
390 | } |
391 | |
392 | tb_ring_stop(ring: net->rx_ring.ring); |
393 | tb_ring_stop(ring: net->tx_ring.ring); |
394 | tbnet_free_buffers(ring: &net->rx_ring); |
395 | tbnet_free_buffers(ring: &net->tx_ring); |
396 | |
397 | ret = tb_xdomain_disable_paths(xd: net->xd, |
398 | transmit_path: net->local_transmit_path, |
399 | transmit_ring: net->rx_ring.ring->hop, |
400 | receive_path: net->remote_transmit_path, |
401 | receive_ring: net->tx_ring.ring->hop); |
402 | if (ret) |
403 | netdev_warn(dev: net->dev, format: "failed to disable DMA paths\n" ); |
404 | |
405 | tb_xdomain_release_in_hopid(xd: net->xd, hopid: net->remote_transmit_path); |
406 | net->remote_transmit_path = 0; |
407 | } |
408 | |
409 | net->login_retries = 0; |
410 | net->login_sent = false; |
411 | net->login_received = false; |
412 | |
413 | netdev_dbg(net->dev, "network traffic stopped\n" ); |
414 | |
415 | mutex_unlock(lock: &net->connection_lock); |
416 | } |
417 | |
418 | static int tbnet_handle_packet(const void *buf, size_t size, void *data) |
419 | { |
420 | const struct thunderbolt_ip_login *pkg = buf; |
421 | struct tbnet *net = data; |
422 | u32 command_id; |
423 | int ret = 0; |
424 | u32 sequence; |
425 | u64 route; |
426 | |
427 | /* Make sure the packet is for us */ |
428 | if (size < sizeof(struct thunderbolt_ip_header)) |
429 | return 0; |
430 | if (!uuid_equal(u1: &pkg->hdr.initiator_uuid, u2: net->xd->remote_uuid)) |
431 | return 0; |
432 | if (!uuid_equal(u1: &pkg->hdr.target_uuid, u2: net->xd->local_uuid)) |
433 | return 0; |
434 | |
435 | route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; |
436 | route &= ~BIT_ULL(63); |
437 | if (route != net->xd->route) |
438 | return 0; |
439 | |
440 | sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK; |
441 | sequence >>= TBIP_HDR_SN_SHIFT; |
442 | command_id = pkg->hdr.command_id; |
443 | |
444 | switch (pkg->hdr.type) { |
445 | case TBIP_LOGIN: |
446 | netdev_dbg(net->dev, "remote login request received\n" ); |
447 | if (!netif_running(dev: net->dev)) |
448 | break; |
449 | |
450 | ret = tbnet_login_response(net, route, sequence, |
451 | command_id: pkg->hdr.command_id); |
452 | if (!ret) { |
453 | netdev_dbg(net->dev, "remote login response sent\n" ); |
454 | |
455 | mutex_lock(&net->connection_lock); |
456 | net->login_received = true; |
457 | net->remote_transmit_path = pkg->transmit_path; |
458 | |
459 | /* If we reached the number of max retries or |
460 | * previous logout, schedule another round of |
461 | * login retries |
462 | */ |
463 | if (net->login_retries >= TBNET_LOGIN_RETRIES || |
464 | !net->login_sent) { |
465 | net->login_retries = 0; |
466 | queue_delayed_work(wq: system_long_wq, |
467 | dwork: &net->login_work, delay: 0); |
468 | } |
469 | mutex_unlock(lock: &net->connection_lock); |
470 | |
471 | queue_work(wq: system_long_wq, work: &net->connected_work); |
472 | } |
473 | break; |
474 | |
475 | case TBIP_LOGOUT: |
476 | netdev_dbg(net->dev, "remote logout request received\n" ); |
477 | ret = tbnet_logout_response(net, route, sequence, command_id); |
478 | if (!ret) { |
479 | netdev_dbg(net->dev, "remote logout response sent\n" ); |
480 | queue_work(wq: system_long_wq, work: &net->disconnect_work); |
481 | } |
482 | break; |
483 | |
484 | default: |
485 | return 0; |
486 | } |
487 | |
488 | if (ret) |
489 | netdev_warn(dev: net->dev, format: "failed to send ThunderboltIP response\n" ); |
490 | |
491 | return 1; |
492 | } |
493 | |
494 | static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring) |
495 | { |
496 | return ring->prod - ring->cons; |
497 | } |
498 | |
499 | static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers) |
500 | { |
501 | struct tbnet_ring *ring = &net->rx_ring; |
502 | int ret; |
503 | |
504 | while (nbuffers--) { |
505 | struct device *dma_dev = tb_ring_dma_device(ring: ring->ring); |
506 | unsigned int index = ring->prod & (TBNET_RING_SIZE - 1); |
507 | struct tbnet_frame *tf = &ring->frames[index]; |
508 | dma_addr_t dma_addr; |
509 | |
510 | if (tf->page) |
511 | break; |
512 | |
513 | /* Allocate page (order > 0) so that it can hold maximum |
514 | * ThunderboltIP frame (4kB) and the additional room for |
515 | * SKB shared info required by build_skb(). |
516 | */ |
517 | tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER); |
518 | if (!tf->page) { |
519 | ret = -ENOMEM; |
520 | goto err_free; |
521 | } |
522 | |
523 | dma_addr = dma_map_page(dma_dev, tf->page, 0, |
524 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); |
525 | if (dma_mapping_error(dev: dma_dev, dma_addr)) { |
526 | ret = -ENOMEM; |
527 | goto err_free; |
528 | } |
529 | |
530 | tf->frame.buffer_phy = dma_addr; |
531 | tf->dev = net->dev; |
532 | |
533 | trace_tbnet_alloc_rx_frame(index, page: tf->page, phys: dma_addr, |
534 | dir: DMA_FROM_DEVICE); |
535 | |
536 | tb_ring_rx(ring: ring->ring, frame: &tf->frame); |
537 | |
538 | ring->prod++; |
539 | } |
540 | |
541 | return 0; |
542 | |
543 | err_free: |
544 | tbnet_free_buffers(ring); |
545 | return ret; |
546 | } |
547 | |
548 | static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) |
549 | { |
550 | struct tbnet_ring *ring = &net->tx_ring; |
551 | struct device *dma_dev = tb_ring_dma_device(ring: ring->ring); |
552 | struct tbnet_frame *tf; |
553 | unsigned int index; |
554 | |
555 | if (!tbnet_available_buffers(ring)) |
556 | return NULL; |
557 | |
558 | index = ring->cons++ & (TBNET_RING_SIZE - 1); |
559 | |
560 | tf = &ring->frames[index]; |
561 | tf->frame.size = 0; |
562 | |
563 | dma_sync_single_for_cpu(dev: dma_dev, addr: tf->frame.buffer_phy, |
564 | size: tbnet_frame_size(tf), dir: DMA_TO_DEVICE); |
565 | |
566 | return tf; |
567 | } |
568 | |
569 | static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, |
570 | bool canceled) |
571 | { |
572 | struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); |
573 | struct tbnet *net = netdev_priv(dev: tf->dev); |
574 | |
575 | /* Return buffer to the ring */ |
576 | net->tx_ring.prod++; |
577 | |
578 | if (tbnet_available_buffers(ring: &net->tx_ring) >= TBNET_RING_SIZE / 2) |
579 | netif_wake_queue(dev: net->dev); |
580 | } |
581 | |
582 | static int tbnet_alloc_tx_buffers(struct tbnet *net) |
583 | { |
584 | struct tbnet_ring *ring = &net->tx_ring; |
585 | struct device *dma_dev = tb_ring_dma_device(ring: ring->ring); |
586 | unsigned int i; |
587 | |
588 | for (i = 0; i < TBNET_RING_SIZE; i++) { |
589 | struct tbnet_frame *tf = &ring->frames[i]; |
590 | dma_addr_t dma_addr; |
591 | |
592 | tf->page = alloc_page(GFP_KERNEL); |
593 | if (!tf->page) { |
594 | tbnet_free_buffers(ring); |
595 | return -ENOMEM; |
596 | } |
597 | |
598 | dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, |
599 | DMA_TO_DEVICE); |
600 | if (dma_mapping_error(dev: dma_dev, dma_addr)) { |
601 | __free_page(tf->page); |
602 | tf->page = NULL; |
603 | tbnet_free_buffers(ring); |
604 | return -ENOMEM; |
605 | } |
606 | |
607 | tf->dev = net->dev; |
608 | tf->frame.buffer_phy = dma_addr; |
609 | tf->frame.callback = tbnet_tx_callback; |
610 | tf->frame.sof = TBIP_PDF_FRAME_START; |
611 | tf->frame.eof = TBIP_PDF_FRAME_END; |
612 | |
613 | trace_tbnet_alloc_tx_frame(index: i, page: tf->page, phys: dma_addr, dir: DMA_TO_DEVICE); |
614 | } |
615 | |
616 | ring->cons = 0; |
617 | ring->prod = TBNET_RING_SIZE - 1; |
618 | |
619 | return 0; |
620 | } |
621 | |
622 | static void tbnet_connected_work(struct work_struct *work) |
623 | { |
624 | struct tbnet *net = container_of(work, typeof(*net), connected_work); |
625 | bool connected; |
626 | int ret; |
627 | |
628 | if (netif_carrier_ok(dev: net->dev)) |
629 | return; |
630 | |
631 | mutex_lock(&net->connection_lock); |
632 | connected = net->login_sent && net->login_received; |
633 | mutex_unlock(lock: &net->connection_lock); |
634 | |
635 | if (!connected) |
636 | return; |
637 | |
638 | netdev_dbg(net->dev, "login successful, enabling paths\n" ); |
639 | |
640 | ret = tb_xdomain_alloc_in_hopid(xd: net->xd, hopid: net->remote_transmit_path); |
641 | if (ret != net->remote_transmit_path) { |
642 | netdev_err(dev: net->dev, format: "failed to allocate Rx HopID\n" ); |
643 | return; |
644 | } |
645 | |
646 | /* Both logins successful so enable the rings, high-speed DMA |
647 | * paths and start the network device queue. |
648 | * |
649 | * Note we enable the DMA paths last to make sure we have primed |
650 | * the Rx ring before any incoming packets are allowed to |
651 | * arrive. |
652 | */ |
653 | tb_ring_start(ring: net->tx_ring.ring); |
654 | tb_ring_start(ring: net->rx_ring.ring); |
655 | |
656 | ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE); |
657 | if (ret) |
658 | goto err_stop_rings; |
659 | |
660 | ret = tbnet_alloc_tx_buffers(net); |
661 | if (ret) |
662 | goto err_free_rx_buffers; |
663 | |
664 | ret = tb_xdomain_enable_paths(xd: net->xd, transmit_path: net->local_transmit_path, |
665 | transmit_ring: net->rx_ring.ring->hop, |
666 | receive_path: net->remote_transmit_path, |
667 | receive_ring: net->tx_ring.ring->hop); |
668 | if (ret) { |
669 | netdev_err(dev: net->dev, format: "failed to enable DMA paths\n" ); |
670 | goto err_free_tx_buffers; |
671 | } |
672 | |
673 | netif_carrier_on(dev: net->dev); |
674 | netif_start_queue(dev: net->dev); |
675 | |
676 | netdev_dbg(net->dev, "network traffic started\n" ); |
677 | return; |
678 | |
679 | err_free_tx_buffers: |
680 | tbnet_free_buffers(ring: &net->tx_ring); |
681 | err_free_rx_buffers: |
682 | tbnet_free_buffers(ring: &net->rx_ring); |
683 | err_stop_rings: |
684 | tb_ring_stop(ring: net->rx_ring.ring); |
685 | tb_ring_stop(ring: net->tx_ring.ring); |
686 | tb_xdomain_release_in_hopid(xd: net->xd, hopid: net->remote_transmit_path); |
687 | } |
688 | |
689 | static void tbnet_login_work(struct work_struct *work) |
690 | { |
691 | struct tbnet *net = container_of(work, typeof(*net), login_work.work); |
692 | unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY); |
693 | int ret; |
694 | |
695 | if (netif_carrier_ok(dev: net->dev)) |
696 | return; |
697 | |
698 | netdev_dbg(net->dev, "sending login request, retries=%u\n" , |
699 | net->login_retries); |
700 | |
701 | ret = tbnet_login_request(net, sequence: net->login_retries % 4); |
702 | if (ret) { |
703 | netdev_dbg(net->dev, "sending login request failed, ret=%d\n" , |
704 | ret); |
705 | if (net->login_retries++ < TBNET_LOGIN_RETRIES) { |
706 | queue_delayed_work(wq: system_long_wq, dwork: &net->login_work, |
707 | delay); |
708 | } else { |
709 | netdev_info(dev: net->dev, format: "ThunderboltIP login timed out\n" ); |
710 | } |
711 | } else { |
712 | netdev_dbg(net->dev, "received login reply\n" ); |
713 | |
714 | net->login_retries = 0; |
715 | |
716 | mutex_lock(&net->connection_lock); |
717 | net->login_sent = true; |
718 | mutex_unlock(lock: &net->connection_lock); |
719 | |
720 | queue_work(wq: system_long_wq, work: &net->connected_work); |
721 | } |
722 | } |
723 | |
724 | static void tbnet_disconnect_work(struct work_struct *work) |
725 | { |
726 | struct tbnet *net = container_of(work, typeof(*net), disconnect_work); |
727 | |
728 | tbnet_tear_down(net, send_logout: false); |
729 | } |
730 | |
731 | static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, |
732 | const struct thunderbolt_ip_frame_header *hdr) |
733 | { |
734 | u32 frame_id, frame_count, frame_size, frame_index; |
735 | unsigned int size; |
736 | |
737 | if (tf->frame.flags & RING_DESC_CRC_ERROR) { |
738 | net->stats.rx_crc_errors++; |
739 | return false; |
740 | } else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) { |
741 | net->stats.rx_over_errors++; |
742 | return false; |
743 | } |
744 | |
745 | /* Should be greater than just header i.e. contains data */ |
746 | size = tbnet_frame_size(tf); |
747 | if (size <= sizeof(*hdr)) { |
748 | net->stats.rx_length_errors++; |
749 | return false; |
750 | } |
751 | |
752 | frame_count = le32_to_cpu(hdr->frame_count); |
753 | frame_size = le32_to_cpu(hdr->frame_size); |
754 | frame_index = le16_to_cpu(hdr->frame_index); |
755 | frame_id = le16_to_cpu(hdr->frame_id); |
756 | |
757 | if ((frame_size > size - sizeof(*hdr)) || !frame_size) { |
758 | net->stats.rx_length_errors++; |
759 | return false; |
760 | } |
761 | |
762 | /* In case we're in the middle of packet, validate the frame |
763 | * header based on first fragment of the packet. |
764 | */ |
765 | if (net->skb && net->rx_hdr.frame_count) { |
766 | /* Check the frame count fits the count field */ |
767 | if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) { |
768 | net->stats.rx_length_errors++; |
769 | return false; |
770 | } |
771 | |
772 | /* Check the frame identifiers are incremented correctly, |
773 | * and id is matching. |
774 | */ |
775 | if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 || |
776 | frame_id != le16_to_cpu(net->rx_hdr.frame_id)) { |
777 | net->stats.rx_missed_errors++; |
778 | return false; |
779 | } |
780 | |
781 | if (net->skb->len + frame_size > TBNET_MAX_MTU) { |
782 | net->stats.rx_length_errors++; |
783 | return false; |
784 | } |
785 | |
786 | return true; |
787 | } |
788 | |
789 | /* Start of packet, validate the frame header */ |
790 | if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) { |
791 | net->stats.rx_length_errors++; |
792 | return false; |
793 | } |
794 | if (frame_index != 0) { |
795 | net->stats.rx_missed_errors++; |
796 | return false; |
797 | } |
798 | |
799 | return true; |
800 | } |
801 | |
802 | static int tbnet_poll(struct napi_struct *napi, int budget) |
803 | { |
804 | struct tbnet *net = container_of(napi, struct tbnet, napi); |
805 | unsigned int cleaned_count = tbnet_available_buffers(ring: &net->rx_ring); |
806 | struct device *dma_dev = tb_ring_dma_device(ring: net->rx_ring.ring); |
807 | unsigned int rx_packets = 0; |
808 | |
809 | while (rx_packets < budget) { |
810 | const struct thunderbolt_ip_frame_header *hdr; |
811 | unsigned int hdr_size = sizeof(*hdr); |
812 | struct sk_buff *skb = NULL; |
813 | struct ring_frame *frame; |
814 | struct tbnet_frame *tf; |
815 | struct page *page; |
816 | bool last = true; |
817 | u32 frame_size; |
818 | |
819 | /* Return some buffers to hardware, one at a time is too |
820 | * slow so allocate MAX_SKB_FRAGS buffers at the same |
821 | * time. |
822 | */ |
823 | if (cleaned_count >= MAX_SKB_FRAGS) { |
824 | tbnet_alloc_rx_buffers(net, nbuffers: cleaned_count); |
825 | cleaned_count = 0; |
826 | } |
827 | |
828 | frame = tb_ring_poll(ring: net->rx_ring.ring); |
829 | if (!frame) |
830 | break; |
831 | |
832 | dma_unmap_page(dma_dev, frame->buffer_phy, |
833 | TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE); |
834 | |
835 | tf = container_of(frame, typeof(*tf), frame); |
836 | |
837 | page = tf->page; |
838 | tf->page = NULL; |
839 | net->rx_ring.cons++; |
840 | cleaned_count++; |
841 | |
842 | hdr = page_address(page); |
843 | if (!tbnet_check_frame(net, tf, hdr)) { |
844 | trace_tbnet_invalid_rx_ip_frame(size: hdr->frame_size, |
845 | id: hdr->frame_id, index: hdr->frame_index, count: hdr->frame_count); |
846 | __free_pages(page, TBNET_RX_PAGE_ORDER); |
847 | dev_kfree_skb_any(skb: net->skb); |
848 | net->skb = NULL; |
849 | continue; |
850 | } |
851 | |
852 | trace_tbnet_rx_ip_frame(size: hdr->frame_size, id: hdr->frame_id, |
853 | index: hdr->frame_index, count: hdr->frame_count); |
854 | frame_size = le32_to_cpu(hdr->frame_size); |
855 | |
856 | skb = net->skb; |
857 | if (!skb) { |
858 | skb = build_skb(page_address(page), |
859 | TBNET_RX_PAGE_SIZE); |
860 | if (!skb) { |
861 | __free_pages(page, TBNET_RX_PAGE_ORDER); |
862 | net->stats.rx_errors++; |
863 | break; |
864 | } |
865 | |
866 | skb_reserve(skb, len: hdr_size); |
867 | skb_put(skb, len: frame_size); |
868 | |
869 | net->skb = skb; |
870 | } else { |
871 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
872 | page, off: hdr_size, size: frame_size, |
873 | TBNET_RX_PAGE_SIZE - hdr_size); |
874 | } |
875 | |
876 | net->rx_hdr.frame_size = hdr->frame_size; |
877 | net->rx_hdr.frame_count = hdr->frame_count; |
878 | net->rx_hdr.frame_index = hdr->frame_index; |
879 | net->rx_hdr.frame_id = hdr->frame_id; |
880 | last = le16_to_cpu(net->rx_hdr.frame_index) == |
881 | le32_to_cpu(net->rx_hdr.frame_count) - 1; |
882 | |
883 | rx_packets++; |
884 | net->stats.rx_bytes += frame_size; |
885 | |
886 | if (last) { |
887 | skb->protocol = eth_type_trans(skb, dev: net->dev); |
888 | trace_tbnet_rx_skb(skb); |
889 | napi_gro_receive(napi: &net->napi, skb); |
890 | net->skb = NULL; |
891 | } |
892 | } |
893 | |
894 | net->stats.rx_packets += rx_packets; |
895 | |
896 | if (cleaned_count) |
897 | tbnet_alloc_rx_buffers(net, nbuffers: cleaned_count); |
898 | |
899 | if (rx_packets >= budget) |
900 | return budget; |
901 | |
902 | napi_complete_done(n: napi, work_done: rx_packets); |
903 | /* Re-enable the ring interrupt */ |
904 | tb_ring_poll_complete(ring: net->rx_ring.ring); |
905 | |
906 | return rx_packets; |
907 | } |
908 | |
909 | static void tbnet_start_poll(void *data) |
910 | { |
911 | struct tbnet *net = data; |
912 | |
913 | napi_schedule(n: &net->napi); |
914 | } |
915 | |
916 | static int tbnet_open(struct net_device *dev) |
917 | { |
918 | struct tbnet *net = netdev_priv(dev); |
919 | struct tb_xdomain *xd = net->xd; |
920 | u16 sof_mask, eof_mask; |
921 | struct tb_ring *ring; |
922 | unsigned int flags; |
923 | int hopid; |
924 | |
925 | netif_carrier_off(dev); |
926 | |
927 | ring = tb_ring_alloc_tx(nhi: xd->tb->nhi, hop: -1, TBNET_RING_SIZE, |
928 | RING_FLAG_FRAME); |
929 | if (!ring) { |
930 | netdev_err(dev, format: "failed to allocate Tx ring\n" ); |
931 | return -ENOMEM; |
932 | } |
933 | net->tx_ring.ring = ring; |
934 | |
935 | hopid = tb_xdomain_alloc_out_hopid(xd, hopid: -1); |
936 | if (hopid < 0) { |
937 | netdev_err(dev, format: "failed to allocate Tx HopID\n" ); |
938 | tb_ring_free(ring: net->tx_ring.ring); |
939 | net->tx_ring.ring = NULL; |
940 | return hopid; |
941 | } |
942 | net->local_transmit_path = hopid; |
943 | |
944 | sof_mask = BIT(TBIP_PDF_FRAME_START); |
945 | eof_mask = BIT(TBIP_PDF_FRAME_END); |
946 | |
947 | flags = RING_FLAG_FRAME; |
948 | /* Only enable full E2E if the other end supports it too */ |
949 | if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E) |
950 | flags |= RING_FLAG_E2E; |
951 | |
952 | ring = tb_ring_alloc_rx(nhi: xd->tb->nhi, hop: -1, TBNET_RING_SIZE, flags, |
953 | e2e_tx_hop: net->tx_ring.ring->hop, sof_mask, |
954 | eof_mask, start_poll: tbnet_start_poll, poll_data: net); |
955 | if (!ring) { |
956 | netdev_err(dev, format: "failed to allocate Rx ring\n" ); |
957 | tb_xdomain_release_out_hopid(xd, hopid); |
958 | tb_ring_free(ring: net->tx_ring.ring); |
959 | net->tx_ring.ring = NULL; |
960 | return -ENOMEM; |
961 | } |
962 | net->rx_ring.ring = ring; |
963 | |
964 | napi_enable(n: &net->napi); |
965 | start_login(net); |
966 | |
967 | return 0; |
968 | } |
969 | |
970 | static int tbnet_stop(struct net_device *dev) |
971 | { |
972 | struct tbnet *net = netdev_priv(dev); |
973 | |
974 | napi_disable(n: &net->napi); |
975 | |
976 | cancel_work_sync(work: &net->disconnect_work); |
977 | tbnet_tear_down(net, send_logout: true); |
978 | |
979 | tb_ring_free(ring: net->rx_ring.ring); |
980 | net->rx_ring.ring = NULL; |
981 | |
982 | tb_xdomain_release_out_hopid(xd: net->xd, hopid: net->local_transmit_path); |
983 | tb_ring_free(ring: net->tx_ring.ring); |
984 | net->tx_ring.ring = NULL; |
985 | |
986 | return 0; |
987 | } |
988 | |
989 | static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, |
990 | struct tbnet_frame **frames, u32 frame_count) |
991 | { |
992 | struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page); |
993 | struct device *dma_dev = tb_ring_dma_device(ring: net->tx_ring.ring); |
994 | unsigned int i, len, offset = skb_transport_offset(skb); |
995 | /* Remove payload length from checksum */ |
996 | u32 paylen = skb->len - skb_transport_offset(skb); |
997 | __wsum wsum = (__force __wsum)htonl(paylen); |
998 | __be16 protocol = skb->protocol; |
999 | void *data = skb->data; |
1000 | void *dest = hdr + 1; |
1001 | __sum16 *tucso; |
1002 | |
1003 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
1004 | /* No need to calculate checksum so we just update the |
1005 | * total frame count and sync the frames for DMA. |
1006 | */ |
1007 | for (i = 0; i < frame_count; i++) { |
1008 | hdr = page_address(frames[i]->page); |
1009 | hdr->frame_count = cpu_to_le32(frame_count); |
1010 | trace_tbnet_tx_ip_frame(size: hdr->frame_size, id: hdr->frame_id, |
1011 | index: hdr->frame_index, count: hdr->frame_count); |
1012 | dma_sync_single_for_device(dev: dma_dev, |
1013 | addr: frames[i]->frame.buffer_phy, |
1014 | size: tbnet_frame_size(tf: frames[i]), dir: DMA_TO_DEVICE); |
1015 | } |
1016 | |
1017 | return true; |
1018 | } |
1019 | |
1020 | if (protocol == htons(ETH_P_8021Q)) { |
1021 | struct vlan_hdr *vhdr, vh; |
1022 | |
1023 | vhdr = skb_header_pointer(skb, ETH_HLEN, len: sizeof(vh), buffer: &vh); |
1024 | if (!vhdr) |
1025 | return false; |
1026 | |
1027 | protocol = vhdr->h_vlan_encapsulated_proto; |
1028 | } |
1029 | |
1030 | /* Data points on the beginning of packet. |
1031 | * Check is the checksum absolute place in the packet. |
1032 | * ipcso will update IP checksum. |
1033 | * tucso will update TCP/UDP checksum. |
1034 | */ |
1035 | if (protocol == htons(ETH_P_IP)) { |
1036 | __sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data); |
1037 | |
1038 | *ipcso = 0; |
1039 | *ipcso = ip_fast_csum(iph: dest + skb_network_offset(skb), |
1040 | ihl: ip_hdr(skb)->ihl); |
1041 | |
1042 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
1043 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); |
1044 | else if (ip_hdr(skb)->protocol == IPPROTO_UDP) |
1045 | tucso = dest + ((void *)&(udp_hdr(skb)->check) - data); |
1046 | else |
1047 | return false; |
1048 | |
1049 | *tucso = ~csum_tcpudp_magic(saddr: ip_hdr(skb)->saddr, |
1050 | daddr: ip_hdr(skb)->daddr, len: 0, |
1051 | proto: ip_hdr(skb)->protocol, sum: 0); |
1052 | } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) { |
1053 | tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data); |
1054 | *tucso = ~csum_ipv6_magic(saddr: &ipv6_hdr(skb)->saddr, |
1055 | daddr: &ipv6_hdr(skb)->daddr, len: 0, |
1056 | IPPROTO_TCP, sum: 0); |
1057 | } else if (protocol == htons(ETH_P_IPV6)) { |
1058 | tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset; |
1059 | *tucso = ~csum_ipv6_magic(saddr: &ipv6_hdr(skb)->saddr, |
1060 | daddr: &ipv6_hdr(skb)->daddr, len: 0, |
1061 | proto: ipv6_hdr(skb)->nexthdr, sum: 0); |
1062 | } else { |
1063 | return false; |
1064 | } |
1065 | |
1066 | /* First frame was headers, rest of the frames contain data. |
1067 | * Calculate checksum over each frame. |
1068 | */ |
1069 | for (i = 0; i < frame_count; i++) { |
1070 | hdr = page_address(frames[i]->page); |
1071 | dest = (void *)(hdr + 1) + offset; |
1072 | len = le32_to_cpu(hdr->frame_size) - offset; |
1073 | wsum = csum_partial(buff: dest, len, sum: wsum); |
1074 | hdr->frame_count = cpu_to_le32(frame_count); |
1075 | trace_tbnet_tx_ip_frame(size: hdr->frame_size, id: hdr->frame_id, |
1076 | index: hdr->frame_index, count: hdr->frame_count); |
1077 | |
1078 | offset = 0; |
1079 | } |
1080 | |
1081 | *tucso = csum_fold(sum: wsum); |
1082 | |
1083 | /* Checksum is finally calculated and we don't touch the memory |
1084 | * anymore, so DMA sync the frames now. |
1085 | */ |
1086 | for (i = 0; i < frame_count; i++) { |
1087 | dma_sync_single_for_device(dev: dma_dev, addr: frames[i]->frame.buffer_phy, |
1088 | size: tbnet_frame_size(tf: frames[i]), dir: DMA_TO_DEVICE); |
1089 | } |
1090 | |
1091 | return true; |
1092 | } |
1093 | |
1094 | static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, |
1095 | unsigned int *len) |
1096 | { |
1097 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; |
1098 | |
1099 | *len = skb_frag_size(frag); |
1100 | return kmap_local_page(page: skb_frag_page(frag)) + skb_frag_off(frag); |
1101 | } |
1102 | |
1103 | static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb, |
1104 | struct net_device *dev) |
1105 | { |
1106 | struct tbnet *net = netdev_priv(dev); |
1107 | struct tbnet_frame *frames[MAX_SKB_FRAGS]; |
1108 | u16 frame_id = atomic_read(v: &net->frame_id); |
1109 | struct thunderbolt_ip_frame_header *hdr; |
1110 | unsigned int len = skb_headlen(skb); |
1111 | unsigned int data_len = skb->len; |
1112 | unsigned int nframes, i; |
1113 | unsigned int frag = 0; |
1114 | void *src = skb->data; |
1115 | u32 frame_index = 0; |
1116 | bool unmap = false; |
1117 | void *dest; |
1118 | |
1119 | trace_tbnet_tx_skb(skb); |
1120 | |
1121 | nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE); |
1122 | if (tbnet_available_buffers(ring: &net->tx_ring) < nframes) { |
1123 | netif_stop_queue(dev: net->dev); |
1124 | return NETDEV_TX_BUSY; |
1125 | } |
1126 | |
1127 | frames[frame_index] = tbnet_get_tx_buffer(net); |
1128 | if (!frames[frame_index]) |
1129 | goto err_drop; |
1130 | |
1131 | hdr = page_address(frames[frame_index]->page); |
1132 | dest = hdr + 1; |
1133 | |
1134 | /* If overall packet is bigger than the frame data size */ |
1135 | while (data_len > TBNET_MAX_PAYLOAD_SIZE) { |
1136 | unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE; |
1137 | |
1138 | hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE); |
1139 | hdr->frame_index = cpu_to_le16(frame_index); |
1140 | hdr->frame_id = cpu_to_le16(frame_id); |
1141 | |
1142 | do { |
1143 | if (len > size_left) { |
1144 | /* Copy data onto Tx buffer data with |
1145 | * full frame size then break and go to |
1146 | * next frame |
1147 | */ |
1148 | memcpy(dest, src, size_left); |
1149 | len -= size_left; |
1150 | dest += size_left; |
1151 | src += size_left; |
1152 | break; |
1153 | } |
1154 | |
1155 | memcpy(dest, src, len); |
1156 | size_left -= len; |
1157 | dest += len; |
1158 | |
1159 | if (unmap) { |
1160 | kunmap_local(src); |
1161 | unmap = false; |
1162 | } |
1163 | |
1164 | /* Ensure all fragments have been processed */ |
1165 | if (frag < skb_shinfo(skb)->nr_frags) { |
1166 | /* Map and then unmap quickly */ |
1167 | src = tbnet_kmap_frag(skb, frag_num: frag++, len: &len); |
1168 | unmap = true; |
1169 | } else if (unlikely(size_left > 0)) { |
1170 | goto err_drop; |
1171 | } |
1172 | } while (size_left > 0); |
1173 | |
1174 | data_len -= TBNET_MAX_PAYLOAD_SIZE; |
1175 | frame_index++; |
1176 | |
1177 | frames[frame_index] = tbnet_get_tx_buffer(net); |
1178 | if (!frames[frame_index]) |
1179 | goto err_drop; |
1180 | |
1181 | hdr = page_address(frames[frame_index]->page); |
1182 | dest = hdr + 1; |
1183 | } |
1184 | |
1185 | hdr->frame_size = cpu_to_le32(data_len); |
1186 | hdr->frame_index = cpu_to_le16(frame_index); |
1187 | hdr->frame_id = cpu_to_le16(frame_id); |
1188 | |
1189 | frames[frame_index]->frame.size = data_len + sizeof(*hdr); |
1190 | |
1191 | /* In case the remaining data_len is smaller than a frame */ |
1192 | while (len < data_len) { |
1193 | memcpy(dest, src, len); |
1194 | data_len -= len; |
1195 | dest += len; |
1196 | |
1197 | if (unmap) { |
1198 | kunmap_local(src); |
1199 | unmap = false; |
1200 | } |
1201 | |
1202 | if (frag < skb_shinfo(skb)->nr_frags) { |
1203 | src = tbnet_kmap_frag(skb, frag_num: frag++, len: &len); |
1204 | unmap = true; |
1205 | } else if (unlikely(data_len > 0)) { |
1206 | goto err_drop; |
1207 | } |
1208 | } |
1209 | |
1210 | memcpy(dest, src, data_len); |
1211 | |
1212 | if (unmap) |
1213 | kunmap_local(src); |
1214 | |
1215 | if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_count: frame_index + 1)) |
1216 | goto err_drop; |
1217 | |
1218 | for (i = 0; i < frame_index + 1; i++) |
1219 | tb_ring_tx(ring: net->tx_ring.ring, frame: &frames[i]->frame); |
1220 | |
1221 | if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID) |
1222 | atomic_inc(v: &net->frame_id); |
1223 | |
1224 | net->stats.tx_packets++; |
1225 | net->stats.tx_bytes += skb->len; |
1226 | |
1227 | trace_tbnet_consume_skb(skb); |
1228 | dev_consume_skb_any(skb); |
1229 | |
1230 | return NETDEV_TX_OK; |
1231 | |
1232 | err_drop: |
1233 | /* We can re-use the buffers */ |
1234 | net->tx_ring.cons -= frame_index; |
1235 | |
1236 | dev_kfree_skb_any(skb); |
1237 | net->stats.tx_errors++; |
1238 | |
1239 | return NETDEV_TX_OK; |
1240 | } |
1241 | |
1242 | static void tbnet_get_stats64(struct net_device *dev, |
1243 | struct rtnl_link_stats64 *stats) |
1244 | { |
1245 | struct tbnet *net = netdev_priv(dev); |
1246 | |
1247 | stats->tx_packets = net->stats.tx_packets; |
1248 | stats->rx_packets = net->stats.rx_packets; |
1249 | stats->tx_bytes = net->stats.tx_bytes; |
1250 | stats->rx_bytes = net->stats.rx_bytes; |
1251 | stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors + |
1252 | net->stats.rx_over_errors + net->stats.rx_crc_errors + |
1253 | net->stats.rx_missed_errors; |
1254 | stats->tx_errors = net->stats.tx_errors; |
1255 | stats->rx_length_errors = net->stats.rx_length_errors; |
1256 | stats->rx_over_errors = net->stats.rx_over_errors; |
1257 | stats->rx_crc_errors = net->stats.rx_crc_errors; |
1258 | stats->rx_missed_errors = net->stats.rx_missed_errors; |
1259 | } |
1260 | |
1261 | static const struct net_device_ops tbnet_netdev_ops = { |
1262 | .ndo_open = tbnet_open, |
1263 | .ndo_stop = tbnet_stop, |
1264 | .ndo_start_xmit = tbnet_start_xmit, |
1265 | .ndo_get_stats64 = tbnet_get_stats64, |
1266 | }; |
1267 | |
1268 | static void tbnet_generate_mac(struct net_device *dev) |
1269 | { |
1270 | const struct tbnet *net = netdev_priv(dev); |
1271 | const struct tb_xdomain *xd = net->xd; |
1272 | u8 addr[ETH_ALEN]; |
1273 | u8 phy_port; |
1274 | u32 hash; |
1275 | |
1276 | phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); |
1277 | |
1278 | /* Unicast and locally administered MAC */ |
1279 | addr[0] = phy_port << 4 | 0x02; |
1280 | hash = jhash2(k: (u32 *)xd->local_uuid, length: 4, initval: 0); |
1281 | memcpy(addr + 1, &hash, sizeof(hash)); |
1282 | hash = jhash2(k: (u32 *)xd->local_uuid, length: 4, initval: hash); |
1283 | addr[5] = hash & 0xff; |
1284 | eth_hw_addr_set(dev, addr); |
1285 | } |
1286 | |
1287 | static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id) |
1288 | { |
1289 | struct tb_xdomain *xd = tb_service_parent(svc); |
1290 | struct net_device *dev; |
1291 | struct tbnet *net; |
1292 | int ret; |
1293 | |
1294 | dev = alloc_etherdev(sizeof(*net)); |
1295 | if (!dev) |
1296 | return -ENOMEM; |
1297 | |
1298 | SET_NETDEV_DEV(dev, &svc->dev); |
1299 | |
1300 | net = netdev_priv(dev); |
1301 | INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); |
1302 | INIT_WORK(&net->connected_work, tbnet_connected_work); |
1303 | INIT_WORK(&net->disconnect_work, tbnet_disconnect_work); |
1304 | mutex_init(&net->connection_lock); |
1305 | atomic_set(v: &net->command_id, i: 0); |
1306 | atomic_set(v: &net->frame_id, i: 0); |
1307 | net->svc = svc; |
1308 | net->dev = dev; |
1309 | net->xd = xd; |
1310 | |
1311 | tbnet_generate_mac(dev); |
1312 | |
1313 | strcpy(p: dev->name, q: "thunderbolt%d" ); |
1314 | dev->netdev_ops = &tbnet_netdev_ops; |
1315 | |
1316 | /* ThunderboltIP takes advantage of TSO packets but instead of |
1317 | * segmenting them we just split the packet into Thunderbolt |
1318 | * frames (maximum payload size of each frame is 4084 bytes) and |
1319 | * calculate checksum over the whole packet here. |
1320 | * |
1321 | * The receiving side does the opposite if the host OS supports |
1322 | * LRO, otherwise it needs to split the large packet into MTU |
1323 | * sized smaller packets. |
1324 | * |
1325 | * In order to receive large packets from the networking stack, |
1326 | * we need to announce support for most of the offloading |
1327 | * features here. |
1328 | */ |
1329 | dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO | |
1330 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1331 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; |
1332 | dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header); |
1333 | |
1334 | netif_napi_add(dev, napi: &net->napi, poll: tbnet_poll); |
1335 | |
1336 | /* MTU range: 68 - 65522 */ |
1337 | dev->min_mtu = ETH_MIN_MTU; |
1338 | dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN; |
1339 | |
1340 | net->handler.uuid = &tbnet_svc_uuid; |
1341 | net->handler.callback = tbnet_handle_packet; |
1342 | net->handler.data = net; |
1343 | tb_register_protocol_handler(handler: &net->handler); |
1344 | |
1345 | tb_service_set_drvdata(svc, data: net); |
1346 | |
1347 | ret = register_netdev(dev); |
1348 | if (ret) { |
1349 | tb_unregister_protocol_handler(handler: &net->handler); |
1350 | free_netdev(dev); |
1351 | return ret; |
1352 | } |
1353 | |
1354 | return 0; |
1355 | } |
1356 | |
1357 | static void tbnet_remove(struct tb_service *svc) |
1358 | { |
1359 | struct tbnet *net = tb_service_get_drvdata(svc); |
1360 | |
1361 | unregister_netdev(dev: net->dev); |
1362 | tb_unregister_protocol_handler(handler: &net->handler); |
1363 | free_netdev(dev: net->dev); |
1364 | } |
1365 | |
1366 | static void tbnet_shutdown(struct tb_service *svc) |
1367 | { |
1368 | tbnet_tear_down(net: tb_service_get_drvdata(svc), send_logout: true); |
1369 | } |
1370 | |
1371 | static int tbnet_suspend(struct device *dev) |
1372 | { |
1373 | struct tb_service *svc = tb_to_service(dev); |
1374 | struct tbnet *net = tb_service_get_drvdata(svc); |
1375 | |
1376 | stop_login(net); |
1377 | if (netif_running(dev: net->dev)) { |
1378 | netif_device_detach(dev: net->dev); |
1379 | tbnet_tear_down(net, send_logout: true); |
1380 | } |
1381 | |
1382 | tb_unregister_protocol_handler(handler: &net->handler); |
1383 | return 0; |
1384 | } |
1385 | |
1386 | static int tbnet_resume(struct device *dev) |
1387 | { |
1388 | struct tb_service *svc = tb_to_service(dev); |
1389 | struct tbnet *net = tb_service_get_drvdata(svc); |
1390 | |
1391 | tb_register_protocol_handler(handler: &net->handler); |
1392 | |
1393 | netif_carrier_off(dev: net->dev); |
1394 | if (netif_running(dev: net->dev)) { |
1395 | netif_device_attach(dev: net->dev); |
1396 | start_login(net); |
1397 | } |
1398 | |
1399 | return 0; |
1400 | } |
1401 | |
1402 | static DEFINE_SIMPLE_DEV_PM_OPS(tbnet_pm_ops, tbnet_suspend, tbnet_resume); |
1403 | |
1404 | static const struct tb_service_id tbnet_ids[] = { |
1405 | { TB_SERVICE("network" , 1) }, |
1406 | { }, |
1407 | }; |
1408 | MODULE_DEVICE_TABLE(tbsvc, tbnet_ids); |
1409 | |
1410 | static struct tb_service_driver tbnet_driver = { |
1411 | .driver = { |
1412 | .owner = THIS_MODULE, |
1413 | .name = "thunderbolt-net" , |
1414 | .pm = pm_sleep_ptr(&tbnet_pm_ops), |
1415 | }, |
1416 | .probe = tbnet_probe, |
1417 | .remove = tbnet_remove, |
1418 | .shutdown = tbnet_shutdown, |
1419 | .id_table = tbnet_ids, |
1420 | }; |
1421 | |
1422 | static int __init tbnet_init(void) |
1423 | { |
1424 | unsigned int flags; |
1425 | int ret; |
1426 | |
1427 | tbnet_dir = tb_property_create_dir(uuid: &tbnet_dir_uuid); |
1428 | if (!tbnet_dir) |
1429 | return -ENOMEM; |
1430 | |
1431 | tb_property_add_immediate(parent: tbnet_dir, key: "prtcid" , value: 1); |
1432 | tb_property_add_immediate(parent: tbnet_dir, key: "prtcvers" , value: 1); |
1433 | tb_property_add_immediate(parent: tbnet_dir, key: "prtcrevs" , value: 1); |
1434 | |
1435 | flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES; |
1436 | if (tbnet_e2e) |
1437 | flags |= TBNET_E2E; |
1438 | tb_property_add_immediate(parent: tbnet_dir, key: "prtcstns" , value: flags); |
1439 | |
1440 | ret = tb_register_property_dir(key: "network" , dir: tbnet_dir); |
1441 | if (ret) |
1442 | goto err_free_dir; |
1443 | |
1444 | ret = tb_register_service_driver(drv: &tbnet_driver); |
1445 | if (ret) |
1446 | goto err_unregister; |
1447 | |
1448 | return 0; |
1449 | |
1450 | err_unregister: |
1451 | tb_unregister_property_dir(key: "network" , dir: tbnet_dir); |
1452 | err_free_dir: |
1453 | tb_property_free_dir(dir: tbnet_dir); |
1454 | |
1455 | return ret; |
1456 | } |
1457 | module_init(tbnet_init); |
1458 | |
1459 | static void __exit tbnet_exit(void) |
1460 | { |
1461 | tb_unregister_service_driver(drv: &tbnet_driver); |
1462 | tb_unregister_property_dir(key: "network" , dir: tbnet_dir); |
1463 | tb_property_free_dir(dir: tbnet_dir); |
1464 | } |
1465 | module_exit(tbnet_exit); |
1466 | |
1467 | MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>" ); |
1468 | MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>" ); |
1469 | MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>" ); |
1470 | MODULE_DESCRIPTION("Thunderbolt/USB4 network driver" ); |
1471 | MODULE_LICENSE("GPL v2" ); |
1472 | |