1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * Authors:
6 * Haiyang Zhang <haiyangz@microsoft.com>
7 * Hank Janssen <hjanssen@microsoft.com>
8 */
9#include <linux/ethtool.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/wait.h>
13#include <linux/highmem.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/if_ether.h>
17#include <linux/netdevice.h>
18#include <linux/if_vlan.h>
19#include <linux/nls.h>
20#include <linux/vmalloc.h>
21#include <linux/rtnetlink.h>
22#include <linux/ucs2_string.h>
23#include <linux/string.h>
24
25#include "hyperv_net.h"
26#include "netvsc_trace.h"
27
28static void rndis_set_multicast(struct work_struct *w);
29
30#define RNDIS_EXT_LEN HV_HYP_PAGE_SIZE
31struct rndis_request {
32 struct list_head list_ent;
33 struct completion wait_event;
34
35 struct rndis_message response_msg;
36 /*
37 * The buffer for extended info after the RNDIS response message. It's
38 * referenced based on the data offset in the RNDIS message. Its size
39 * is enough for current needs, and should be sufficient for the near
40 * future.
41 */
42 u8 response_ext[RNDIS_EXT_LEN];
43
44 /* Simplify allocation by having a netvsc packet inline */
45 struct hv_netvsc_packet pkt;
46
47 struct rndis_message request_msg;
48 /*
49 * The buffer for the extended info after the RNDIS request message.
50 * It is referenced and sized in a similar way as response_ext.
51 */
52 u8 request_ext[RNDIS_EXT_LEN];
53};
54
55static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
56 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
57 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
58 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
59 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
60 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
61};
62
63static struct rndis_device *get_rndis_device(void)
64{
65 struct rndis_device *device;
66
67 device = kzalloc(size: sizeof(struct rndis_device), GFP_KERNEL);
68 if (!device)
69 return NULL;
70
71 spin_lock_init(&device->request_lock);
72
73 INIT_LIST_HEAD(list: &device->req_list);
74 INIT_WORK(&device->mcast_work, rndis_set_multicast);
75
76 device->state = RNDIS_DEV_UNINITIALIZED;
77
78 return device;
79}
80
81static struct rndis_request *get_rndis_request(struct rndis_device *dev,
82 u32 msg_type,
83 u32 msg_len)
84{
85 struct rndis_request *request;
86 struct rndis_message *rndis_msg;
87 struct rndis_set_request *set;
88 unsigned long flags;
89
90 request = kzalloc(size: sizeof(struct rndis_request), GFP_KERNEL);
91 if (!request)
92 return NULL;
93
94 init_completion(x: &request->wait_event);
95
96 rndis_msg = &request->request_msg;
97 rndis_msg->ndis_msg_type = msg_type;
98 rndis_msg->msg_len = msg_len;
99
100 request->pkt.q_idx = 0;
101
102 /*
103 * Set the request id. This field is always after the rndis header for
104 * request/response packet types so we just used the SetRequest as a
105 * template
106 */
107 set = &rndis_msg->msg.set_req;
108 set->req_id = atomic_inc_return(v: &dev->new_req_id);
109
110 /* Add to the request list */
111 spin_lock_irqsave(&dev->request_lock, flags);
112 list_add_tail(new: &request->list_ent, head: &dev->req_list);
113 spin_unlock_irqrestore(lock: &dev->request_lock, flags);
114
115 return request;
116}
117
118static void put_rndis_request(struct rndis_device *dev,
119 struct rndis_request *req)
120{
121 unsigned long flags;
122
123 spin_lock_irqsave(&dev->request_lock, flags);
124 list_del(entry: &req->list_ent);
125 spin_unlock_irqrestore(lock: &dev->request_lock, flags);
126
127 kfree(objp: req);
128}
129
130static void dump_rndis_message(struct net_device *netdev,
131 const struct rndis_message *rndis_msg,
132 const void *data)
133{
134 switch (rndis_msg->ndis_msg_type) {
135 case RNDIS_MSG_PACKET:
136 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >= sizeof(struct rndis_packet)) {
137 const struct rndis_packet *pkt = data + RNDIS_HEADER_SIZE;
138 netdev_dbg(netdev, "RNDIS_MSG_PACKET (len %u, "
139 "data offset %u data len %u, # oob %u, "
140 "oob offset %u, oob len %u, pkt offset %u, "
141 "pkt len %u\n",
142 rndis_msg->msg_len,
143 pkt->data_offset,
144 pkt->data_len,
145 pkt->num_oob_data_elements,
146 pkt->oob_data_offset,
147 pkt->oob_data_len,
148 pkt->per_pkt_info_offset,
149 pkt->per_pkt_info_len);
150 }
151 break;
152
153 case RNDIS_MSG_INIT_C:
154 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
155 sizeof(struct rndis_initialize_complete)) {
156 const struct rndis_initialize_complete *init_complete =
157 data + RNDIS_HEADER_SIZE;
158 netdev_dbg(netdev, "RNDIS_MSG_INIT_C "
159 "(len %u, id 0x%x, status 0x%x, major %d, minor %d, "
160 "device flags %d, max xfer size 0x%x, max pkts %u, "
161 "pkt aligned %u)\n",
162 rndis_msg->msg_len,
163 init_complete->req_id,
164 init_complete->status,
165 init_complete->major_ver,
166 init_complete->minor_ver,
167 init_complete->dev_flags,
168 init_complete->max_xfer_size,
169 init_complete->max_pkt_per_msg,
170 init_complete->pkt_alignment_factor);
171 }
172 break;
173
174 case RNDIS_MSG_QUERY_C:
175 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
176 sizeof(struct rndis_query_complete)) {
177 const struct rndis_query_complete *query_complete =
178 data + RNDIS_HEADER_SIZE;
179 netdev_dbg(netdev, "RNDIS_MSG_QUERY_C "
180 "(len %u, id 0x%x, status 0x%x, buf len %u, "
181 "buf offset %u)\n",
182 rndis_msg->msg_len,
183 query_complete->req_id,
184 query_complete->status,
185 query_complete->info_buflen,
186 query_complete->info_buf_offset);
187 }
188 break;
189
190 case RNDIS_MSG_SET_C:
191 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE + sizeof(struct rndis_set_complete)) {
192 const struct rndis_set_complete *set_complete =
193 data + RNDIS_HEADER_SIZE;
194 netdev_dbg(netdev,
195 "RNDIS_MSG_SET_C (len %u, id 0x%x, status 0x%x)\n",
196 rndis_msg->msg_len,
197 set_complete->req_id,
198 set_complete->status);
199 }
200 break;
201
202 case RNDIS_MSG_INDICATE:
203 if (rndis_msg->msg_len - RNDIS_HEADER_SIZE >=
204 sizeof(struct rndis_indicate_status)) {
205 const struct rndis_indicate_status *indicate_status =
206 data + RNDIS_HEADER_SIZE;
207 netdev_dbg(netdev, "RNDIS_MSG_INDICATE "
208 "(len %u, status 0x%x, buf len %u, buf offset %u)\n",
209 rndis_msg->msg_len,
210 indicate_status->status,
211 indicate_status->status_buflen,
212 indicate_status->status_buf_offset);
213 }
214 break;
215
216 default:
217 netdev_dbg(netdev, "0x%x (len %u)\n",
218 rndis_msg->ndis_msg_type,
219 rndis_msg->msg_len);
220 break;
221 }
222}
223
224static int rndis_filter_send_request(struct rndis_device *dev,
225 struct rndis_request *req)
226{
227 struct hv_netvsc_packet *packet;
228 struct hv_page_buffer page_buf[2];
229 struct hv_page_buffer *pb = page_buf;
230 int ret;
231
232 /* Setup the packet to send it */
233 packet = &req->pkt;
234
235 packet->total_data_buflen = req->request_msg.msg_len;
236 packet->page_buf_cnt = 1;
237
238 pb[0].pfn = virt_to_phys(address: &req->request_msg) >>
239 HV_HYP_PAGE_SHIFT;
240 pb[0].len = req->request_msg.msg_len;
241 pb[0].offset = offset_in_hvpage(&req->request_msg);
242
243 /* Add one page_buf when request_msg crossing page boundary */
244 if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) {
245 packet->page_buf_cnt++;
246 pb[0].len = HV_HYP_PAGE_SIZE -
247 pb[0].offset;
248 pb[1].pfn = virt_to_phys(address: (void *)&req->request_msg
249 + pb[0].len) >> HV_HYP_PAGE_SHIFT;
250 pb[1].offset = 0;
251 pb[1].len = req->request_msg.msg_len -
252 pb[0].len;
253 }
254
255 trace_rndis_send(ndev: dev->ndev, q: 0, msg: &req->request_msg);
256
257 rcu_read_lock_bh();
258 ret = netvsc_send(net: dev->ndev, packet, NULL, page_buffer: pb, NULL, xdp_tx: false);
259 rcu_read_unlock_bh();
260
261 return ret;
262}
263
264static void rndis_set_link_state(struct rndis_device *rdev,
265 struct rndis_request *request)
266{
267 u32 link_status;
268 struct rndis_query_complete *query_complete;
269 u32 msg_len = request->response_msg.msg_len;
270
271 /* Ensure the packet is big enough to access its fields */
272 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete))
273 return;
274
275 query_complete = &request->response_msg.msg.query_complete;
276
277 if (query_complete->status == RNDIS_STATUS_SUCCESS &&
278 query_complete->info_buflen >= sizeof(u32) &&
279 query_complete->info_buf_offset >= sizeof(*query_complete) &&
280 msg_len - RNDIS_HEADER_SIZE >= query_complete->info_buf_offset &&
281 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
282 >= query_complete->info_buflen) {
283 memcpy(&link_status, (void *)((unsigned long)query_complete +
284 query_complete->info_buf_offset), sizeof(u32));
285 rdev->link_state = link_status != 0;
286 }
287}
288
289static void rndis_filter_receive_response(struct net_device *ndev,
290 struct netvsc_device *nvdev,
291 struct rndis_message *resp,
292 void *data)
293{
294 u32 *req_id = &resp->msg.init_complete.req_id;
295 struct rndis_device *dev = nvdev->extension;
296 struct rndis_request *request = NULL;
297 bool found = false;
298 unsigned long flags;
299
300 /* This should never happen, it means control message
301 * response received after device removed.
302 */
303 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
304 netdev_err(dev: ndev,
305 format: "got rndis message uninitialized\n");
306 return;
307 }
308
309 /* Ensure the packet is big enough to read req_id. Req_id is the 1st
310 * field in any request/response message, so the payload should have at
311 * least sizeof(u32) bytes
312 */
313 if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) {
314 netdev_err(dev: ndev, format: "rndis msg_len too small: %u\n",
315 resp->msg_len);
316 return;
317 }
318
319 /* Copy the request ID into nvchan->recv_buf */
320 *req_id = *(u32 *)(data + RNDIS_HEADER_SIZE);
321
322 spin_lock_irqsave(&dev->request_lock, flags);
323 list_for_each_entry(request, &dev->req_list, list_ent) {
324 /*
325 * All request/response message contains RequestId as the 1st
326 * field
327 */
328 if (request->request_msg.msg.init_req.req_id == *req_id) {
329 found = true;
330 break;
331 }
332 }
333 spin_unlock_irqrestore(lock: &dev->request_lock, flags);
334
335 if (found) {
336 if (resp->msg_len <=
337 sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
338 memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id));
339 unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id),
340 data + RNDIS_HEADER_SIZE + sizeof(*req_id),
341 resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id),
342 "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request");
343 if (request->request_msg.ndis_msg_type ==
344 RNDIS_MSG_QUERY && request->request_msg.msg.
345 query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
346 rndis_set_link_state(rdev: dev, request);
347 } else {
348 netdev_err(dev: ndev,
349 format: "rndis response buffer overflow "
350 "detected (size %u max %zu)\n",
351 resp->msg_len,
352 sizeof(struct rndis_message));
353
354 if (resp->ndis_msg_type ==
355 RNDIS_MSG_RESET_C) {
356 /* does not have a request id field */
357 request->response_msg.msg.reset_complete.
358 status = RNDIS_STATUS_BUFFER_OVERFLOW;
359 } else {
360 request->response_msg.msg.
361 init_complete.status =
362 RNDIS_STATUS_BUFFER_OVERFLOW;
363 }
364 }
365
366 netvsc_dma_unmap(hv_dev: ((struct net_device_context *)
367 netdev_priv(dev: ndev))->device_ctx, packet: &request->pkt);
368 complete(&request->wait_event);
369 } else {
370 netdev_err(dev: ndev,
371 format: "no rndis request found for this response "
372 "(id 0x%x res type 0x%x)\n",
373 *req_id,
374 resp->ndis_msg_type);
375 }
376}
377
378/*
379 * Get the Per-Packet-Info with the specified type
380 * return NULL if not found.
381 */
382static inline void *rndis_get_ppi(struct net_device *ndev,
383 struct rndis_packet *rpkt,
384 u32 rpkt_len, u32 type, u8 internal,
385 u32 ppi_size, void *data)
386{
387 struct rndis_per_packet_info *ppi;
388 int len;
389
390 if (rpkt->per_pkt_info_offset == 0)
391 return NULL;
392
393 /* Validate info_offset and info_len */
394 if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) ||
395 rpkt->per_pkt_info_offset > rpkt_len) {
396 netdev_err(dev: ndev, format: "Invalid per_pkt_info_offset: %u\n",
397 rpkt->per_pkt_info_offset);
398 return NULL;
399 }
400
401 if (rpkt->per_pkt_info_len < sizeof(*ppi) ||
402 rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) {
403 netdev_err(dev: ndev, format: "Invalid per_pkt_info_len: %u\n",
404 rpkt->per_pkt_info_len);
405 return NULL;
406 }
407
408 ppi = (struct rndis_per_packet_info *)((ulong)rpkt +
409 rpkt->per_pkt_info_offset);
410 /* Copy the PPIs into nvchan->recv_buf */
411 memcpy(ppi, data + RNDIS_HEADER_SIZE + rpkt->per_pkt_info_offset, rpkt->per_pkt_info_len);
412 len = rpkt->per_pkt_info_len;
413
414 while (len > 0) {
415 /* Validate ppi_offset and ppi_size */
416 if (ppi->size > len) {
417 netdev_err(dev: ndev, format: "Invalid ppi size: %u\n", ppi->size);
418 continue;
419 }
420
421 if (ppi->ppi_offset >= ppi->size) {
422 netdev_err(dev: ndev, format: "Invalid ppi_offset: %u\n", ppi->ppi_offset);
423 continue;
424 }
425
426 if (ppi->type == type && ppi->internal == internal) {
427 /* ppi->size should be big enough to hold the returned object. */
428 if (ppi->size - ppi->ppi_offset < ppi_size) {
429 netdev_err(dev: ndev, format: "Invalid ppi: size %u ppi_offset %u\n",
430 ppi->size, ppi->ppi_offset);
431 continue;
432 }
433 return (void *)((ulong)ppi + ppi->ppi_offset);
434 }
435 len -= ppi->size;
436 ppi = (struct rndis_per_packet_info *)((ulong)ppi + ppi->size);
437 }
438
439 return NULL;
440}
441
442static inline
443void rsc_add_data(struct netvsc_channel *nvchan,
444 const struct ndis_pkt_8021q_info *vlan,
445 const struct ndis_tcp_ip_checksum_info *csum_info,
446 const u32 *hash_info,
447 void *data, u32 len)
448{
449 u32 cnt = nvchan->rsc.cnt;
450
451 if (cnt) {
452 nvchan->rsc.pktlen += len;
453 } else {
454 /* The data/values pointed by vlan, csum_info and hash_info are shared
455 * across the different 'fragments' of the RSC packet; store them into
456 * the packet itself.
457 */
458 if (vlan != NULL) {
459 memcpy(&nvchan->rsc.vlan, vlan, sizeof(*vlan));
460 nvchan->rsc.ppi_flags |= NVSC_RSC_VLAN;
461 } else {
462 nvchan->rsc.ppi_flags &= ~NVSC_RSC_VLAN;
463 }
464 if (csum_info != NULL) {
465 memcpy(&nvchan->rsc.csum_info, csum_info, sizeof(*csum_info));
466 nvchan->rsc.ppi_flags |= NVSC_RSC_CSUM_INFO;
467 } else {
468 nvchan->rsc.ppi_flags &= ~NVSC_RSC_CSUM_INFO;
469 }
470 nvchan->rsc.pktlen = len;
471 if (hash_info != NULL) {
472 nvchan->rsc.hash_info = *hash_info;
473 nvchan->rsc.ppi_flags |= NVSC_RSC_HASH_INFO;
474 } else {
475 nvchan->rsc.ppi_flags &= ~NVSC_RSC_HASH_INFO;
476 }
477 }
478
479 nvchan->rsc.data[cnt] = data;
480 nvchan->rsc.len[cnt] = len;
481 nvchan->rsc.cnt++;
482}
483
484static int rndis_filter_receive_data(struct net_device *ndev,
485 struct netvsc_device *nvdev,
486 struct netvsc_channel *nvchan,
487 struct rndis_message *msg,
488 void *data, u32 data_buflen)
489{
490 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
491 const struct ndis_tcp_ip_checksum_info *csum_info;
492 const struct ndis_pkt_8021q_info *vlan;
493 const struct rndis_pktinfo_id *pktinfo_id;
494 const u32 *hash_info;
495 u32 data_offset, rpkt_len;
496 bool rsc_more = false;
497 int ret;
498
499 /* Ensure data_buflen is big enough to read header fields */
500 if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) {
501 netdev_err(dev: ndev, format: "invalid rndis pkt, data_buflen too small: %u\n",
502 data_buflen);
503 return NVSP_STAT_FAIL;
504 }
505
506 /* Copy the RNDIS packet into nvchan->recv_buf */
507 memcpy(rndis_pkt, data + RNDIS_HEADER_SIZE, sizeof(*rndis_pkt));
508
509 /* Validate rndis_pkt offset */
510 if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) {
511 netdev_err(dev: ndev, format: "invalid rndis packet offset: %u\n",
512 rndis_pkt->data_offset);
513 return NVSP_STAT_FAIL;
514 }
515
516 /* Remove the rndis header and pass it back up the stack */
517 data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
518
519 rpkt_len = data_buflen - RNDIS_HEADER_SIZE;
520 data_buflen -= data_offset;
521
522 /*
523 * Make sure we got a valid RNDIS message, now total_data_buflen
524 * should be the data packet size plus the trailer padding size
525 */
526 if (unlikely(data_buflen < rndis_pkt->data_len)) {
527 netdev_err(dev: ndev, format: "rndis message buffer "
528 "overflow detected (got %u, min %u)"
529 "...dropping this message!\n",
530 data_buflen, rndis_pkt->data_len);
531 return NVSP_STAT_FAIL;
532 }
533
534 vlan = rndis_get_ppi(ndev, rpkt: rndis_pkt, rpkt_len, type: IEEE_8021Q_INFO, internal: 0, ppi_size: sizeof(*vlan),
535 data);
536
537 csum_info = rndis_get_ppi(ndev, rpkt: rndis_pkt, rpkt_len, type: TCPIP_CHKSUM_PKTINFO, internal: 0,
538 ppi_size: sizeof(*csum_info), data);
539
540 hash_info = rndis_get_ppi(ndev, rpkt: rndis_pkt, rpkt_len, type: NBL_HASH_VALUE, internal: 0,
541 ppi_size: sizeof(*hash_info), data);
542
543 pktinfo_id = rndis_get_ppi(ndev, rpkt: rndis_pkt, rpkt_len, type: RNDIS_PKTINFO_ID, internal: 1,
544 ppi_size: sizeof(*pktinfo_id), data);
545
546 /* Identify RSC frags, drop erroneous packets */
547 if (pktinfo_id && (pktinfo_id->flag & RNDIS_PKTINFO_SUBALLOC)) {
548 if (pktinfo_id->flag & RNDIS_PKTINFO_1ST_FRAG)
549 nvchan->rsc.cnt = 0;
550 else if (nvchan->rsc.cnt == 0)
551 goto drop;
552
553 rsc_more = true;
554
555 if (pktinfo_id->flag & RNDIS_PKTINFO_LAST_FRAG)
556 rsc_more = false;
557
558 if (rsc_more && nvchan->rsc.is_last)
559 goto drop;
560 } else {
561 nvchan->rsc.cnt = 0;
562 }
563
564 if (unlikely(nvchan->rsc.cnt >= NVSP_RSC_MAX))
565 goto drop;
566
567 /* Put data into per channel structure.
568 * Also, remove the rndis trailer padding from rndis packet message
569 * rndis_pkt->data_len tell us the real data length, we only copy
570 * the data packet to the stack, without the rndis trailer padding
571 */
572 rsc_add_data(nvchan, vlan, csum_info, hash_info,
573 data: data + data_offset, len: rndis_pkt->data_len);
574
575 if (rsc_more)
576 return NVSP_STAT_SUCCESS;
577
578 ret = netvsc_recv_callback(net: ndev, nvdev, nvchan);
579 nvchan->rsc.cnt = 0;
580
581 return ret;
582
583drop:
584 return NVSP_STAT_FAIL;
585}
586
587int rndis_filter_receive(struct net_device *ndev,
588 struct netvsc_device *net_dev,
589 struct netvsc_channel *nvchan,
590 void *data, u32 buflen)
591{
592 struct net_device_context *net_device_ctx = netdev_priv(dev: ndev);
593 struct rndis_message *rndis_msg = nvchan->recv_buf;
594
595 if (buflen < RNDIS_HEADER_SIZE) {
596 netdev_err(dev: ndev, format: "Invalid rndis_msg (buflen: %u)\n", buflen);
597 return NVSP_STAT_FAIL;
598 }
599
600 /* Copy the RNDIS msg header into nvchan->recv_buf */
601 memcpy(rndis_msg, data, RNDIS_HEADER_SIZE);
602
603 /* Validate incoming rndis_message packet */
604 if (rndis_msg->msg_len < RNDIS_HEADER_SIZE ||
605 buflen < rndis_msg->msg_len) {
606 netdev_err(dev: ndev, format: "Invalid rndis_msg (buflen: %u, msg_len: %u)\n",
607 buflen, rndis_msg->msg_len);
608 return NVSP_STAT_FAIL;
609 }
610
611 if (netif_msg_rx_status(net_device_ctx))
612 dump_rndis_message(netdev: ndev, rndis_msg, data);
613
614 switch (rndis_msg->ndis_msg_type) {
615 case RNDIS_MSG_PACKET:
616 return rndis_filter_receive_data(ndev, nvdev: net_dev, nvchan,
617 msg: rndis_msg, data, data_buflen: buflen);
618 case RNDIS_MSG_INIT_C:
619 case RNDIS_MSG_QUERY_C:
620 case RNDIS_MSG_SET_C:
621 /* completion msgs */
622 rndis_filter_receive_response(ndev, nvdev: net_dev, resp: rndis_msg, data);
623 break;
624
625 case RNDIS_MSG_INDICATE:
626 /* notification msgs */
627 netvsc_linkstatus_callback(net: ndev, resp: rndis_msg, data, data_buflen: buflen);
628 break;
629 default:
630 netdev_err(dev: ndev,
631 format: "unhandled rndis message (type %u len %u)\n",
632 rndis_msg->ndis_msg_type,
633 rndis_msg->msg_len);
634 return NVSP_STAT_FAIL;
635 }
636
637 return NVSP_STAT_SUCCESS;
638}
639
640static int rndis_filter_query_device(struct rndis_device *dev,
641 struct netvsc_device *nvdev,
642 u32 oid, void *result, u32 *result_size)
643{
644 struct rndis_request *request;
645 u32 inresult_size = *result_size;
646 struct rndis_query_request *query;
647 struct rndis_query_complete *query_complete;
648 u32 msg_len;
649 int ret = 0;
650
651 if (!result)
652 return -EINVAL;
653
654 *result_size = 0;
655 request = get_rndis_request(dev, RNDIS_MSG_QUERY,
656 RNDIS_MESSAGE_SIZE(struct rndis_query_request));
657 if (!request) {
658 ret = -ENOMEM;
659 goto cleanup;
660 }
661
662 /* Setup the rndis query */
663 query = &request->request_msg.msg.query_req;
664 query->oid = oid;
665 query->info_buf_offset = sizeof(struct rndis_query_request);
666 query->info_buflen = 0;
667 query->dev_vc_handle = 0;
668
669 if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
670 struct ndis_offload *hwcaps;
671 u32 nvsp_version = nvdev->nvsp_version;
672 u8 ndis_rev;
673 size_t size;
674
675 if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
676 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
677 size = NDIS_OFFLOAD_SIZE;
678 } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
679 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
680 size = NDIS_OFFLOAD_SIZE_6_1;
681 } else {
682 ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
683 size = NDIS_OFFLOAD_SIZE_6_0;
684 }
685
686 request->request_msg.msg_len += size;
687 query->info_buflen = size;
688 hwcaps = (struct ndis_offload *)
689 ((unsigned long)query + query->info_buf_offset);
690
691 hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
692 hwcaps->header.revision = ndis_rev;
693 hwcaps->header.size = size;
694
695 } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
696 struct ndis_recv_scale_cap *cap;
697
698 request->request_msg.msg_len +=
699 sizeof(struct ndis_recv_scale_cap);
700 query->info_buflen = sizeof(struct ndis_recv_scale_cap);
701 cap = (struct ndis_recv_scale_cap *)((unsigned long)query +
702 query->info_buf_offset);
703 cap->hdr.type = NDIS_OBJECT_TYPE_RSS_CAPABILITIES;
704 cap->hdr.rev = NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2;
705 cap->hdr.size = sizeof(struct ndis_recv_scale_cap);
706 }
707
708 ret = rndis_filter_send_request(dev, req: request);
709 if (ret != 0)
710 goto cleanup;
711
712 wait_for_completion(&request->wait_event);
713
714 /* Copy the response back */
715 query_complete = &request->response_msg.msg.query_complete;
716 msg_len = request->response_msg.msg_len;
717
718 /* Ensure the packet is big enough to access its fields */
719 if (msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_query_complete)) {
720 ret = -1;
721 goto cleanup;
722 }
723
724 if (query_complete->info_buflen > inresult_size ||
725 query_complete->info_buf_offset < sizeof(*query_complete) ||
726 msg_len - RNDIS_HEADER_SIZE < query_complete->info_buf_offset ||
727 msg_len - RNDIS_HEADER_SIZE - query_complete->info_buf_offset
728 < query_complete->info_buflen) {
729 ret = -1;
730 goto cleanup;
731 }
732
733 memcpy(result,
734 (void *)((unsigned long)query_complete +
735 query_complete->info_buf_offset),
736 query_complete->info_buflen);
737
738 *result_size = query_complete->info_buflen;
739
740cleanup:
741 if (request)
742 put_rndis_request(dev, req: request);
743
744 return ret;
745}
746
747/* Get the hardware offload capabilities */
748static int
749rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device,
750 struct ndis_offload *caps)
751{
752 u32 caps_len = sizeof(*caps);
753 int ret;
754
755 memset(caps, 0, sizeof(*caps));
756
757 ret = rndis_filter_query_device(dev, nvdev: net_device,
758 OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
759 result: caps, result_size: &caps_len);
760 if (ret)
761 return ret;
762
763 if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
764 netdev_warn(dev: dev->ndev, format: "invalid NDIS objtype %#x\n",
765 caps->header.type);
766 return -EINVAL;
767 }
768
769 if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
770 netdev_warn(dev: dev->ndev, format: "invalid NDIS objrev %x\n",
771 caps->header.revision);
772 return -EINVAL;
773 }
774
775 if (caps->header.size > caps_len ||
776 caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
777 netdev_warn(dev: dev->ndev,
778 format: "invalid NDIS objsize %u, data size %u\n",
779 caps->header.size, caps_len);
780 return -EINVAL;
781 }
782
783 return 0;
784}
785
786static int rndis_filter_query_device_mac(struct rndis_device *dev,
787 struct netvsc_device *net_device)
788{
789 u32 size = ETH_ALEN;
790
791 return rndis_filter_query_device(dev, nvdev: net_device,
792 RNDIS_OID_802_3_PERMANENT_ADDRESS,
793 result: dev->hw_mac_adr, result_size: &size);
794}
795
796#define NWADR_STR "NetworkAddress"
797#define NWADR_STRLEN 14
798
799int rndis_filter_set_device_mac(struct netvsc_device *nvdev,
800 const char *mac)
801{
802 struct rndis_device *rdev = nvdev->extension;
803 struct rndis_request *request;
804 struct rndis_set_request *set;
805 struct rndis_config_parameter_info *cpi;
806 wchar_t *cfg_nwadr, *cfg_mac;
807 struct rndis_set_complete *set_complete;
808 char macstr[2*ETH_ALEN+1];
809 u32 extlen = sizeof(struct rndis_config_parameter_info) +
810 2*NWADR_STRLEN + 4*ETH_ALEN;
811 int ret;
812
813 request = get_rndis_request(dev: rdev, RNDIS_MSG_SET,
814 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
815 if (!request)
816 return -ENOMEM;
817
818 set = &request->request_msg.msg.set_req;
819 set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
820 set->info_buflen = extlen;
821 set->info_buf_offset = sizeof(struct rndis_set_request);
822 set->dev_vc_handle = 0;
823
824 cpi = (struct rndis_config_parameter_info *)((ulong)set +
825 set->info_buf_offset);
826 cpi->parameter_name_offset =
827 sizeof(struct rndis_config_parameter_info);
828 /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
829 cpi->parameter_name_length = 2*NWADR_STRLEN;
830 cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
831 cpi->parameter_value_offset =
832 cpi->parameter_name_offset + cpi->parameter_name_length;
833 /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
834 cpi->parameter_value_length = 4*ETH_ALEN;
835
836 cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
837 cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
838 ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, endian: UTF16_HOST_ENDIAN,
839 pwcs: cfg_nwadr, NWADR_STRLEN);
840 if (ret < 0)
841 goto cleanup;
842 snprintf(buf: macstr, size: 2*ETH_ALEN+1, fmt: "%pm", mac);
843 ret = utf8s_to_utf16s(s: macstr, len: 2*ETH_ALEN, endian: UTF16_HOST_ENDIAN,
844 pwcs: cfg_mac, maxlen: 2*ETH_ALEN);
845 if (ret < 0)
846 goto cleanup;
847
848 ret = rndis_filter_send_request(dev: rdev, req: request);
849 if (ret != 0)
850 goto cleanup;
851
852 wait_for_completion(&request->wait_event);
853
854 set_complete = &request->response_msg.msg.set_complete;
855 if (set_complete->status != RNDIS_STATUS_SUCCESS)
856 ret = -EIO;
857
858cleanup:
859 put_rndis_request(dev: rdev, req: request);
860 return ret;
861}
862
863int
864rndis_filter_set_offload_params(struct net_device *ndev,
865 struct netvsc_device *nvdev,
866 struct ndis_offload_params *req_offloads)
867{
868 struct rndis_device *rdev = nvdev->extension;
869 struct rndis_request *request;
870 struct rndis_set_request *set;
871 struct ndis_offload_params *offload_params;
872 struct rndis_set_complete *set_complete;
873 u32 extlen = sizeof(struct ndis_offload_params);
874 int ret;
875 u32 vsp_version = nvdev->nvsp_version;
876
877 if (vsp_version <= NVSP_PROTOCOL_VERSION_4) {
878 extlen = VERSION_4_OFFLOAD_SIZE;
879 /* On NVSP_PROTOCOL_VERSION_4 and below, we do not support
880 * UDP checksum offload.
881 */
882 req_offloads->udp_ip_v4_csum = 0;
883 req_offloads->udp_ip_v6_csum = 0;
884 }
885
886 request = get_rndis_request(dev: rdev, RNDIS_MSG_SET,
887 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
888 if (!request)
889 return -ENOMEM;
890
891 set = &request->request_msg.msg.set_req;
892 set->oid = OID_TCP_OFFLOAD_PARAMETERS;
893 set->info_buflen = extlen;
894 set->info_buf_offset = sizeof(struct rndis_set_request);
895 set->dev_vc_handle = 0;
896
897 offload_params = (struct ndis_offload_params *)((ulong)set +
898 set->info_buf_offset);
899 *offload_params = *req_offloads;
900 offload_params->header.type = NDIS_OBJECT_TYPE_DEFAULT;
901 offload_params->header.revision = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
902 offload_params->header.size = extlen;
903
904 ret = rndis_filter_send_request(dev: rdev, req: request);
905 if (ret != 0)
906 goto cleanup;
907
908 wait_for_completion(&request->wait_event);
909 set_complete = &request->response_msg.msg.set_complete;
910 if (set_complete->status != RNDIS_STATUS_SUCCESS) {
911 netdev_err(dev: ndev, format: "Fail to set offload on host side:0x%x\n",
912 set_complete->status);
913 ret = -EINVAL;
914 }
915
916cleanup:
917 put_rndis_request(dev: rdev, req: request);
918 return ret;
919}
920
921static int rndis_set_rss_param_msg(struct rndis_device *rdev,
922 const u8 *rss_key, u16 flag)
923{
924 struct net_device *ndev = rdev->ndev;
925 struct net_device_context *ndc = netdev_priv(dev: ndev);
926 struct rndis_request *request;
927 struct rndis_set_request *set;
928 struct rndis_set_complete *set_complete;
929 u32 extlen = sizeof(struct ndis_recv_scale_param) +
930 4 * ndc->rx_table_sz + NETVSC_HASH_KEYLEN;
931 struct ndis_recv_scale_param *rssp;
932 u32 *itab;
933 u8 *keyp;
934 int i, ret;
935
936 request = get_rndis_request(
937 dev: rdev, RNDIS_MSG_SET,
938 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
939 if (!request)
940 return -ENOMEM;
941
942 set = &request->request_msg.msg.set_req;
943 set->oid = OID_GEN_RECEIVE_SCALE_PARAMETERS;
944 set->info_buflen = extlen;
945 set->info_buf_offset = sizeof(struct rndis_set_request);
946 set->dev_vc_handle = 0;
947
948 rssp = (struct ndis_recv_scale_param *)(set + 1);
949 rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
950 rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
951 rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
952 rssp->flag = flag;
953 rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
954 NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
955 NDIS_HASH_TCP_IPV6;
956 rssp->indirect_tabsize = 4 * ndc->rx_table_sz;
957 rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
958 rssp->hashkey_size = NETVSC_HASH_KEYLEN;
959 rssp->hashkey_offset = rssp->indirect_taboffset +
960 rssp->indirect_tabsize;
961
962 /* Set indirection table entries */
963 itab = (u32 *)(rssp + 1);
964 for (i = 0; i < ndc->rx_table_sz; i++)
965 itab[i] = ndc->rx_table[i];
966
967 /* Set hask key values */
968 keyp = (u8 *)((unsigned long)rssp + rssp->hashkey_offset);
969 memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
970
971 ret = rndis_filter_send_request(dev: rdev, req: request);
972 if (ret != 0)
973 goto cleanup;
974
975 wait_for_completion(&request->wait_event);
976 set_complete = &request->response_msg.msg.set_complete;
977 if (set_complete->status == RNDIS_STATUS_SUCCESS) {
978 if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
979 !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
980 memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
981
982 } else {
983 netdev_err(dev: ndev, format: "Fail to set RSS parameters:0x%x\n",
984 set_complete->status);
985 ret = -EINVAL;
986 }
987
988cleanup:
989 put_rndis_request(dev: rdev, req: request);
990 return ret;
991}
992
993int rndis_filter_set_rss_param(struct rndis_device *rdev,
994 const u8 *rss_key)
995{
996 /* Disable RSS before change */
997 rndis_set_rss_param_msg(rdev, rss_key,
998 NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
999
1000 return rndis_set_rss_param_msg(rdev, rss_key, flag: 0);
1001}
1002
1003static int rndis_filter_query_device_link_status(struct rndis_device *dev,
1004 struct netvsc_device *net_device)
1005{
1006 u32 size = sizeof(u32);
1007 u32 link_status;
1008
1009 return rndis_filter_query_device(dev, nvdev: net_device,
1010 RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
1011 result: &link_status, result_size: &size);
1012}
1013
1014static int rndis_filter_query_link_speed(struct rndis_device *dev,
1015 struct netvsc_device *net_device)
1016{
1017 u32 size = sizeof(u32);
1018 u32 link_speed;
1019 struct net_device_context *ndc;
1020 int ret;
1021
1022 ret = rndis_filter_query_device(dev, nvdev: net_device,
1023 RNDIS_OID_GEN_LINK_SPEED,
1024 result: &link_speed, result_size: &size);
1025
1026 if (!ret) {
1027 ndc = netdev_priv(dev: dev->ndev);
1028
1029 /* The link speed reported from host is in 100bps unit, so
1030 * we convert it to Mbps here.
1031 */
1032 ndc->speed = link_speed / 10000;
1033 }
1034
1035 return ret;
1036}
1037
1038static int rndis_filter_set_packet_filter(struct rndis_device *dev,
1039 u32 new_filter)
1040{
1041 struct rndis_request *request;
1042 struct rndis_set_request *set;
1043 int ret;
1044
1045 if (dev->filter == new_filter)
1046 return 0;
1047
1048 request = get_rndis_request(dev, RNDIS_MSG_SET,
1049 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
1050 sizeof(u32));
1051 if (!request)
1052 return -ENOMEM;
1053
1054 /* Setup the rndis set */
1055 set = &request->request_msg.msg.set_req;
1056 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
1057 set->info_buflen = sizeof(u32);
1058 set->info_buf_offset = offsetof(typeof(*set), info_buf);
1059 memcpy(set->info_buf, &new_filter, sizeof(u32));
1060
1061 ret = rndis_filter_send_request(dev, req: request);
1062 if (ret == 0) {
1063 wait_for_completion(&request->wait_event);
1064 dev->filter = new_filter;
1065 }
1066
1067 put_rndis_request(dev, req: request);
1068
1069 return ret;
1070}
1071
1072static void rndis_set_multicast(struct work_struct *w)
1073{
1074 struct rndis_device *rdev
1075 = container_of(w, struct rndis_device, mcast_work);
1076 u32 filter = NDIS_PACKET_TYPE_DIRECTED;
1077 unsigned int flags = rdev->ndev->flags;
1078
1079 if (flags & IFF_PROMISC) {
1080 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
1081 } else {
1082 if (!netdev_mc_empty(rdev->ndev) || (flags & IFF_ALLMULTI))
1083 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
1084 if (flags & IFF_BROADCAST)
1085 filter |= NDIS_PACKET_TYPE_BROADCAST;
1086 }
1087
1088 rndis_filter_set_packet_filter(dev: rdev, new_filter: filter);
1089}
1090
1091void rndis_filter_update(struct netvsc_device *nvdev)
1092{
1093 struct rndis_device *rdev = nvdev->extension;
1094
1095 schedule_work(work: &rdev->mcast_work);
1096}
1097
1098static int rndis_filter_init_device(struct rndis_device *dev,
1099 struct netvsc_device *nvdev)
1100{
1101 struct rndis_request *request;
1102 struct rndis_initialize_request *init;
1103 struct rndis_initialize_complete *init_complete;
1104 u32 status;
1105 int ret;
1106
1107 request = get_rndis_request(dev, RNDIS_MSG_INIT,
1108 RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
1109 if (!request) {
1110 ret = -ENOMEM;
1111 goto cleanup;
1112 }
1113
1114 /* Setup the rndis set */
1115 init = &request->request_msg.msg.init_req;
1116 init->major_ver = RNDIS_MAJOR_VERSION;
1117 init->minor_ver = RNDIS_MINOR_VERSION;
1118 init->max_xfer_size = 0x4000;
1119
1120 dev->state = RNDIS_DEV_INITIALIZING;
1121
1122 ret = rndis_filter_send_request(dev, req: request);
1123 if (ret != 0) {
1124 dev->state = RNDIS_DEV_UNINITIALIZED;
1125 goto cleanup;
1126 }
1127
1128 wait_for_completion(&request->wait_event);
1129
1130 init_complete = &request->response_msg.msg.init_complete;
1131 status = init_complete->status;
1132 if (status == RNDIS_STATUS_SUCCESS) {
1133 dev->state = RNDIS_DEV_INITIALIZED;
1134 nvdev->max_pkt = init_complete->max_pkt_per_msg;
1135 nvdev->pkt_align = 1 << init_complete->pkt_alignment_factor;
1136 ret = 0;
1137 } else {
1138 dev->state = RNDIS_DEV_UNINITIALIZED;
1139 ret = -EINVAL;
1140 }
1141
1142cleanup:
1143 if (request)
1144 put_rndis_request(dev, req: request);
1145
1146 return ret;
1147}
1148
1149static bool netvsc_device_idle(const struct netvsc_device *nvdev)
1150{
1151 int i;
1152
1153 for (i = 0; i < nvdev->num_chn; i++) {
1154 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1155
1156 if (nvchan->mrc.first != nvchan->mrc.next)
1157 return false;
1158
1159 if (atomic_read(v: &nvchan->queue_sends) > 0)
1160 return false;
1161 }
1162
1163 return true;
1164}
1165
1166static void rndis_filter_halt_device(struct netvsc_device *nvdev,
1167 struct rndis_device *dev)
1168{
1169 struct rndis_request *request;
1170 struct rndis_halt_request *halt;
1171
1172 /* Attempt to do a rndis device halt */
1173 request = get_rndis_request(dev, RNDIS_MSG_HALT,
1174 RNDIS_MESSAGE_SIZE(struct rndis_halt_request));
1175 if (!request)
1176 goto cleanup;
1177
1178 /* Setup the rndis set */
1179 halt = &request->request_msg.msg.halt_req;
1180 halt->req_id = atomic_inc_return(v: &dev->new_req_id);
1181
1182 /* Ignore return since this msg is optional. */
1183 rndis_filter_send_request(dev, req: request);
1184
1185 dev->state = RNDIS_DEV_UNINITIALIZED;
1186
1187cleanup:
1188 nvdev->destroy = true;
1189
1190 /* Force flag to be ordered before waiting */
1191 wmb();
1192
1193 /* Wait for all send completions */
1194 wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
1195
1196 if (request)
1197 put_rndis_request(dev, req: request);
1198}
1199
1200static int rndis_filter_open_device(struct rndis_device *dev)
1201{
1202 int ret;
1203
1204 if (dev->state != RNDIS_DEV_INITIALIZED)
1205 return 0;
1206
1207 ret = rndis_filter_set_packet_filter(dev,
1208 NDIS_PACKET_TYPE_BROADCAST |
1209 NDIS_PACKET_TYPE_ALL_MULTICAST |
1210 NDIS_PACKET_TYPE_DIRECTED);
1211 if (ret == 0)
1212 dev->state = RNDIS_DEV_DATAINITIALIZED;
1213
1214 return ret;
1215}
1216
1217static int rndis_filter_close_device(struct rndis_device *dev)
1218{
1219 int ret;
1220
1221 if (dev->state != RNDIS_DEV_DATAINITIALIZED)
1222 return 0;
1223
1224 /* Make sure rndis_set_multicast doesn't re-enable filter! */
1225 cancel_work_sync(work: &dev->mcast_work);
1226
1227 ret = rndis_filter_set_packet_filter(dev, new_filter: 0);
1228 if (ret == -ENODEV)
1229 ret = 0;
1230
1231 if (ret == 0)
1232 dev->state = RNDIS_DEV_INITIALIZED;
1233
1234 return ret;
1235}
1236
1237static void netvsc_sc_open(struct vmbus_channel *new_sc)
1238{
1239 struct net_device *ndev =
1240 hv_get_drvdata(dev: new_sc->primary_channel->device_obj);
1241 struct net_device_context *ndev_ctx = netdev_priv(dev: ndev);
1242 struct netvsc_device *nvscdev;
1243 u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
1244 struct netvsc_channel *nvchan;
1245 int ret;
1246
1247 /* This is safe because this callback only happens when
1248 * new device is being setup and waiting on the channel_init_wait.
1249 */
1250 nvscdev = rcu_dereference_raw(ndev_ctx->nvdev);
1251 if (!nvscdev || chn_index >= nvscdev->num_chn)
1252 return;
1253
1254 nvchan = nvscdev->chan_table + chn_index;
1255
1256 /* Because the device uses NAPI, all the interrupt batching and
1257 * control is done via Net softirq, not the channel handling
1258 */
1259 set_channel_read_mode(c: new_sc, mode: HV_CALL_ISR);
1260
1261 /* Set the channel before opening.*/
1262 nvchan->channel = new_sc;
1263
1264 new_sc->next_request_id_callback = vmbus_next_request_id;
1265 new_sc->request_addr_callback = vmbus_request_addr;
1266 new_sc->rqstor_size = netvsc_rqstor_size(ringbytes: netvsc_ring_bytes);
1267 new_sc->max_pkt_size = NETVSC_MAX_PKT_SIZE;
1268
1269 ret = vmbus_open(channel: new_sc, send_ringbuffersize: netvsc_ring_bytes,
1270 recv_ringbuffersize: netvsc_ring_bytes, NULL, userdatalen: 0,
1271 onchannel_callback: netvsc_channel_cb, context: nvchan);
1272 if (ret == 0)
1273 napi_enable(n: &nvchan->napi);
1274 else
1275 netdev_notice(dev: ndev, format: "sub channel open failed: %d\n", ret);
1276
1277 if (atomic_inc_return(v: &nvscdev->open_chn) == nvscdev->num_chn)
1278 wake_up(&nvscdev->subchan_open);
1279}
1280
1281/* Open sub-channels after completing the handling of the device probe.
1282 * This breaks overlap of processing the host message for the
1283 * new primary channel with the initialization of sub-channels.
1284 */
1285int rndis_set_subchannel(struct net_device *ndev,
1286 struct netvsc_device *nvdev,
1287 struct netvsc_device_info *dev_info)
1288{
1289 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1290 struct net_device_context *ndev_ctx = netdev_priv(dev: ndev);
1291 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1292 struct rndis_device *rdev = nvdev->extension;
1293 int i, ret;
1294
1295 ASSERT_RTNL();
1296
1297 memset(init_packet, 0, sizeof(struct nvsp_message));
1298 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
1299 init_packet->msg.v5_msg.subchn_req.op = NVSP_SUBCHANNEL_ALLOCATE;
1300 init_packet->msg.v5_msg.subchn_req.num_subchannels =
1301 nvdev->num_chn - 1;
1302 trace_nvsp_send(ndev, msg: init_packet);
1303
1304 ret = vmbus_sendpacket(channel: hv_dev->channel, buffer: init_packet,
1305 bufferLen: sizeof(struct nvsp_message),
1306 requestid: (unsigned long)init_packet,
1307 type: VM_PKT_DATA_INBAND,
1308 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1309 if (ret) {
1310 netdev_err(dev: ndev, format: "sub channel allocate send failed: %d\n", ret);
1311 return ret;
1312 }
1313
1314 wait_for_completion(&nvdev->channel_init_wait);
1315 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1316 netdev_err(dev: ndev, format: "sub channel request failed\n");
1317 return -EIO;
1318 }
1319
1320 /* Check that number of allocated sub channel is within the expected range */
1321 if (init_packet->msg.v5_msg.subchn_comp.num_subchannels > nvdev->num_chn - 1) {
1322 netdev_err(dev: ndev, format: "invalid number of allocated sub channel\n");
1323 return -EINVAL;
1324 }
1325 nvdev->num_chn = 1 +
1326 init_packet->msg.v5_msg.subchn_comp.num_subchannels;
1327
1328 /* wait for all sub channels to open */
1329 wait_event(nvdev->subchan_open,
1330 atomic_read(&nvdev->open_chn) == nvdev->num_chn);
1331
1332 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1333 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1334
1335 /* ignore failures from setting rss parameters, still have channels */
1336 if (dev_info)
1337 rndis_filter_set_rss_param(rdev, rss_key: dev_info->rss_key);
1338 else
1339 rndis_filter_set_rss_param(rdev, rss_key: netvsc_hash_key);
1340
1341 netif_set_real_num_tx_queues(dev: ndev, txq: nvdev->num_chn);
1342 netif_set_real_num_rx_queues(dev: ndev, rxq: nvdev->num_chn);
1343
1344 return 0;
1345}
1346
1347static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
1348 struct netvsc_device *nvdev)
1349{
1350 struct net_device *net = rndis_device->ndev;
1351 struct net_device_context *net_device_ctx = netdev_priv(dev: net);
1352 struct ndis_offload hwcaps;
1353 struct ndis_offload_params offloads;
1354 unsigned int gso_max_size = GSO_LEGACY_MAX_SIZE;
1355 int ret;
1356
1357 /* Find HW offload capabilities */
1358 ret = rndis_query_hwcaps(dev: rndis_device, net_device: nvdev, caps: &hwcaps);
1359 if (ret != 0)
1360 return ret;
1361
1362 /* A value of zero means "no change"; now turn on what we want. */
1363 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1364
1365 /* Linux does not care about IP checksum, always does in kernel */
1366 offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
1367
1368 /* Reset previously set hw_features flags */
1369 net->hw_features &= ~NETVSC_SUPPORTED_HW_FEATURES;
1370 net_device_ctx->tx_checksum_mask = 0;
1371
1372 /* Compute tx offload settings based on hw capabilities */
1373 net->hw_features |= NETIF_F_RXCSUM;
1374 net->hw_features |= NETIF_F_SG;
1375 net->hw_features |= NETIF_F_RXHASH;
1376
1377 if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
1378 /* Can checksum TCP */
1379 net->hw_features |= NETIF_F_IP_CSUM;
1380 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
1381
1382 offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1383
1384 if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
1385 offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1386 net->hw_features |= NETIF_F_TSO;
1387
1388 if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
1389 gso_max_size = hwcaps.lsov2.ip4_maxsz;
1390 }
1391
1392 if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
1393 offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1394 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
1395 }
1396 }
1397
1398 if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
1399 net->hw_features |= NETIF_F_IPV6_CSUM;
1400
1401 offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1402 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
1403
1404 if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
1405 (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
1406 offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
1407 net->hw_features |= NETIF_F_TSO6;
1408
1409 if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
1410 gso_max_size = hwcaps.lsov2.ip6_maxsz;
1411 }
1412
1413 if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
1414 offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
1415 net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
1416 }
1417 }
1418
1419 if (hwcaps.rsc.ip4 && hwcaps.rsc.ip6) {
1420 net->hw_features |= NETIF_F_LRO;
1421
1422 if (net->features & NETIF_F_LRO) {
1423 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1424 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1425 } else {
1426 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1427 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1428 }
1429 }
1430
1431 /* In case some hw_features disappeared we need to remove them from
1432 * net->features list as they're no longer supported.
1433 */
1434 net->features &= ~NETVSC_SUPPORTED_HW_FEATURES | net->hw_features;
1435
1436 netif_set_tso_max_size(dev: net, size: gso_max_size);
1437
1438 ret = rndis_filter_set_offload_params(ndev: net, nvdev, req_offloads: &offloads);
1439
1440 return ret;
1441}
1442
1443static void rndis_get_friendly_name(struct net_device *net,
1444 struct rndis_device *rndis_device,
1445 struct netvsc_device *net_device)
1446{
1447 ucs2_char_t wname[256];
1448 unsigned long len;
1449 u8 ifalias[256];
1450 u32 size;
1451
1452 size = sizeof(wname);
1453 if (rndis_filter_query_device(dev: rndis_device, nvdev: net_device,
1454 RNDIS_OID_GEN_FRIENDLY_NAME,
1455 result: wname, result_size: &size) != 0)
1456 return; /* ignore if host does not support */
1457
1458 if (size == 0)
1459 return; /* name not set */
1460
1461 /* Convert Windows Unicode string to UTF-8 */
1462 len = ucs2_as_utf8(dest: ifalias, src: wname, maxlength: sizeof(ifalias));
1463
1464 /* ignore the default value from host */
1465 if (strcmp(ifalias, "Network Adapter") != 0)
1466 dev_set_alias(net, ifalias, len);
1467}
1468
1469struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1470 struct netvsc_device_info *device_info)
1471{
1472 struct net_device *net = hv_get_drvdata(dev);
1473 struct net_device_context *ndc = netdev_priv(dev: net);
1474 struct netvsc_device *net_device;
1475 struct rndis_device *rndis_device;
1476 struct ndis_recv_scale_cap rsscap;
1477 u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
1478 u32 mtu, size;
1479 u32 num_possible_rss_qs;
1480 int i, ret;
1481
1482 rndis_device = get_rndis_device();
1483 if (!rndis_device)
1484 return ERR_PTR(error: -ENODEV);
1485
1486 /* Let the inner driver handle this first to create the netvsc channel
1487 * NOTE! Once the channel is created, we may get a receive callback
1488 * (RndisFilterOnReceive()) before this call is completed
1489 */
1490 net_device = netvsc_device_add(device: dev, info: device_info);
1491 if (IS_ERR(ptr: net_device)) {
1492 kfree(objp: rndis_device);
1493 return net_device;
1494 }
1495
1496 /* Initialize the rndis device */
1497 net_device->max_chn = 1;
1498 net_device->num_chn = 1;
1499
1500 net_device->extension = rndis_device;
1501 rndis_device->ndev = net;
1502
1503 /* Send the rndis initialization message */
1504 ret = rndis_filter_init_device(dev: rndis_device, nvdev: net_device);
1505 if (ret != 0)
1506 goto err_dev_remv;
1507
1508 /* Get the MTU from the host */
1509 size = sizeof(u32);
1510 ret = rndis_filter_query_device(dev: rndis_device, nvdev: net_device,
1511 RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
1512 result: &mtu, result_size: &size);
1513 if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
1514 net->mtu = mtu;
1515
1516 /* Get the mac address */
1517 ret = rndis_filter_query_device_mac(dev: rndis_device, net_device);
1518 if (ret != 0)
1519 goto err_dev_remv;
1520
1521 memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
1522
1523 /* Get friendly name as ifalias*/
1524 if (!net->ifalias)
1525 rndis_get_friendly_name(net, rndis_device, net_device);
1526
1527 /* Query and set hardware capabilities */
1528 ret = rndis_netdev_set_hwcaps(rndis_device, nvdev: net_device);
1529 if (ret != 0)
1530 goto err_dev_remv;
1531
1532 rndis_filter_query_device_link_status(dev: rndis_device, net_device);
1533
1534 netdev_dbg(net, "Device MAC %pM link state %s\n",
1535 rndis_device->hw_mac_adr,
1536 rndis_device->link_state ? "down" : "up");
1537
1538 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1539 goto out;
1540
1541 rndis_filter_query_link_speed(dev: rndis_device, net_device);
1542
1543 /* vRSS setup */
1544 memset(&rsscap, 0, rsscap_size);
1545 ret = rndis_filter_query_device(dev: rndis_device, nvdev: net_device,
1546 OID_GEN_RECEIVE_SCALE_CAPABILITIES,
1547 result: &rsscap, result_size: &rsscap_size);
1548 if (ret || rsscap.num_recv_que < 2)
1549 goto out;
1550
1551 if (rsscap.num_indirect_tabent &&
1552 rsscap.num_indirect_tabent <= ITAB_NUM_MAX)
1553 ndc->rx_table_sz = rsscap.num_indirect_tabent;
1554 else
1555 ndc->rx_table_sz = ITAB_NUM;
1556
1557 ndc->rx_table = kcalloc(n: ndc->rx_table_sz, size: sizeof(u16), GFP_KERNEL);
1558 if (!ndc->rx_table) {
1559 ret = -ENOMEM;
1560 goto err_dev_remv;
1561 }
1562
1563 /* This guarantees that num_possible_rss_qs <= num_online_cpus */
1564 num_possible_rss_qs = min_t(u32, num_online_cpus(),
1565 rsscap.num_recv_que);
1566
1567 net_device->max_chn = min_t(u32, VRSS_CHANNEL_MAX, num_possible_rss_qs);
1568
1569 /* We will use the given number of channels if available. */
1570 net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
1571
1572 if (!netif_is_rxfh_configured(dev: net)) {
1573 for (i = 0; i < ndc->rx_table_sz; i++)
1574 ndc->rx_table[i] = ethtool_rxfh_indir_default(
1575 index: i, n_rx_rings: net_device->num_chn);
1576 }
1577
1578 atomic_set(v: &net_device->open_chn, i: 1);
1579 vmbus_set_sc_create_callback(primary_channel: dev->channel, sc_cr_cb: netvsc_sc_open);
1580
1581 for (i = 1; i < net_device->num_chn; i++) {
1582 ret = netvsc_alloc_recv_comp_ring(net_device, q_idx: i);
1583 if (ret) {
1584 while (--i != 0)
1585 vfree(addr: net_device->chan_table[i].mrc.slots);
1586 goto out;
1587 }
1588 }
1589
1590 for (i = 1; i < net_device->num_chn; i++)
1591 netif_napi_add(dev: net, napi: &net_device->chan_table[i].napi,
1592 poll: netvsc_poll);
1593
1594 return net_device;
1595
1596out:
1597 /* setting up multiple channels failed */
1598 net_device->max_chn = 1;
1599 net_device->num_chn = 1;
1600 return net_device;
1601
1602err_dev_remv:
1603 rndis_filter_device_remove(dev, nvdev: net_device);
1604 return ERR_PTR(error: ret);
1605}
1606
1607void rndis_filter_device_remove(struct hv_device *dev,
1608 struct netvsc_device *net_dev)
1609{
1610 struct rndis_device *rndis_dev = net_dev->extension;
1611 struct net_device *net = hv_get_drvdata(dev);
1612 struct net_device_context *ndc;
1613
1614 ndc = netdev_priv(dev: net);
1615
1616 /* Halt and release the rndis device */
1617 rndis_filter_halt_device(nvdev: net_dev, dev: rndis_dev);
1618
1619 netvsc_device_remove(device: dev);
1620
1621 ndc->rx_table_sz = 0;
1622 kfree(objp: ndc->rx_table);
1623 ndc->rx_table = NULL;
1624}
1625
1626int rndis_filter_open(struct netvsc_device *nvdev)
1627{
1628 if (!nvdev)
1629 return -EINVAL;
1630
1631 return rndis_filter_open_device(dev: nvdev->extension);
1632}
1633
1634int rndis_filter_close(struct netvsc_device *nvdev)
1635{
1636 if (!nvdev)
1637 return -EINVAL;
1638
1639 return rndis_filter_close_device(dev: nvdev->extension);
1640}
1641

source code of linux/drivers/net/hyperv/rndis_filter.c