1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/kstrtox.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/stat.h>
12#include <linux/ctype.h>
13#include <linux/pci.h>
14#include <linux/pci-p2pdma.h>
15#ifdef CONFIG_NVME_TARGET_AUTH
16#include <linux/nvme-auth.h>
17#endif
18#include <linux/nvme-keyring.h>
19#include <crypto/hash.h>
20#include <crypto/kpp.h>
21#include <linux/nospec.h>
22
23#include "nvmet.h"
24
25static const struct config_item_type nvmet_host_type;
26static const struct config_item_type nvmet_subsys_type;
27
28static LIST_HEAD(nvmet_ports_list);
29struct list_head *nvmet_ports = &nvmet_ports_list;
30
31struct nvmet_type_name_map {
32 u8 type;
33 const char *name;
34};
35
36static struct nvmet_type_name_map nvmet_transport[] = {
37 { NVMF_TRTYPE_RDMA, "rdma" },
38 { NVMF_TRTYPE_FC, "fc" },
39 { NVMF_TRTYPE_TCP, "tcp" },
40 { NVMF_TRTYPE_LOOP, "loop" },
41};
42
43static const struct nvmet_type_name_map nvmet_addr_family[] = {
44 { NVMF_ADDR_FAMILY_PCI, "pcie" },
45 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
46 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
47 { NVMF_ADDR_FAMILY_IB, "ib" },
48 { NVMF_ADDR_FAMILY_FC, "fc" },
49 { NVMF_ADDR_FAMILY_LOOP, "loop" },
50};
51
52static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
53{
54 if (p->enabled)
55 pr_err("Disable port '%u' before changing attribute in %s\n",
56 le16_to_cpu(p->disc_addr.portid), caller);
57 return p->enabled;
58}
59
60/*
61 * nvmet_port Generic ConfigFS definitions.
62 * Used in any place in the ConfigFS tree that refers to an address.
63 */
64static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
65{
66 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
67 int i;
68
69 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
70 if (nvmet_addr_family[i].type == adrfam)
71 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n",
72 nvmet_addr_family[i].name);
73 }
74
75 return snprintf(buf: page, PAGE_SIZE, fmt: "\n");
76}
77
78static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
79 const char *page, size_t count)
80{
81 struct nvmet_port *port = to_nvmet_port(item);
82 int i;
83
84 if (nvmet_is_port_enabled(p: port, caller: __func__))
85 return -EACCES;
86
87 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
88 if (sysfs_streq(s1: page, s2: nvmet_addr_family[i].name))
89 goto found;
90 }
91
92 pr_err("Invalid value '%s' for adrfam\n", page);
93 return -EINVAL;
94
95found:
96 port->disc_addr.adrfam = nvmet_addr_family[i].type;
97 return count;
98}
99
100CONFIGFS_ATTR(nvmet_, addr_adrfam);
101
102static ssize_t nvmet_addr_portid_show(struct config_item *item,
103 char *page)
104{
105 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
106
107 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", le16_to_cpu(portid));
108}
109
110static ssize_t nvmet_addr_portid_store(struct config_item *item,
111 const char *page, size_t count)
112{
113 struct nvmet_port *port = to_nvmet_port(item);
114 u16 portid = 0;
115
116 if (kstrtou16(s: page, base: 0, res: &portid)) {
117 pr_err("Invalid value '%s' for portid\n", page);
118 return -EINVAL;
119 }
120
121 if (nvmet_is_port_enabled(p: port, caller: __func__))
122 return -EACCES;
123
124 port->disc_addr.portid = cpu_to_le16(portid);
125 return count;
126}
127
128CONFIGFS_ATTR(nvmet_, addr_portid);
129
130static ssize_t nvmet_addr_traddr_show(struct config_item *item,
131 char *page)
132{
133 struct nvmet_port *port = to_nvmet_port(item);
134
135 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n", port->disc_addr.traddr);
136}
137
138static ssize_t nvmet_addr_traddr_store(struct config_item *item,
139 const char *page, size_t count)
140{
141 struct nvmet_port *port = to_nvmet_port(item);
142
143 if (count > NVMF_TRADDR_SIZE) {
144 pr_err("Invalid value '%s' for traddr\n", page);
145 return -EINVAL;
146 }
147
148 if (nvmet_is_port_enabled(p: port, caller: __func__))
149 return -EACCES;
150
151 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
152 return -EINVAL;
153 return count;
154}
155
156CONFIGFS_ATTR(nvmet_, addr_traddr);
157
158static const struct nvmet_type_name_map nvmet_addr_treq[] = {
159 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
160 { NVMF_TREQ_REQUIRED, "required" },
161 { NVMF_TREQ_NOT_REQUIRED, "not required" },
162};
163
164static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
165{
166 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
167}
168
169static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
170{
171 u8 treq = nvmet_port_disc_addr_treq_secure_channel(port: to_nvmet_port(item));
172 int i;
173
174 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
175 if (treq == nvmet_addr_treq[i].type)
176 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n",
177 nvmet_addr_treq[i].name);
178 }
179
180 return snprintf(buf: page, PAGE_SIZE, fmt: "\n");
181}
182
183static ssize_t nvmet_addr_treq_store(struct config_item *item,
184 const char *page, size_t count)
185{
186 struct nvmet_port *port = to_nvmet_port(item);
187 u8 treq = nvmet_port_disc_addr_treq_mask(port);
188 int i;
189
190 if (nvmet_is_port_enabled(p: port, caller: __func__))
191 return -EACCES;
192
193 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
194 if (sysfs_streq(s1: page, s2: nvmet_addr_treq[i].name))
195 goto found;
196 }
197
198 pr_err("Invalid value '%s' for treq\n", page);
199 return -EINVAL;
200
201found:
202 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
203 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
204 switch (nvmet_addr_treq[i].type) {
205 case NVMF_TREQ_NOT_SPECIFIED:
206 pr_debug("treq '%s' not allowed for TLS1.3\n",
207 nvmet_addr_treq[i].name);
208 return -EINVAL;
209 case NVMF_TREQ_NOT_REQUIRED:
210 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
211 break;
212 default:
213 break;
214 }
215 }
216 treq |= nvmet_addr_treq[i].type;
217 port->disc_addr.treq = treq;
218 return count;
219}
220
221CONFIGFS_ATTR(nvmet_, addr_treq);
222
223static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
224 char *page)
225{
226 struct nvmet_port *port = to_nvmet_port(item);
227
228 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n", port->disc_addr.trsvcid);
229}
230
231static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
232 const char *page, size_t count)
233{
234 struct nvmet_port *port = to_nvmet_port(item);
235
236 if (count > NVMF_TRSVCID_SIZE) {
237 pr_err("Invalid value '%s' for trsvcid\n", page);
238 return -EINVAL;
239 }
240 if (nvmet_is_port_enabled(p: port, caller: __func__))
241 return -EACCES;
242
243 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
244 return -EINVAL;
245 return count;
246}
247
248CONFIGFS_ATTR(nvmet_, addr_trsvcid);
249
250static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
251 char *page)
252{
253 struct nvmet_port *port = to_nvmet_port(item);
254
255 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", port->inline_data_size);
256}
257
258static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
259 const char *page, size_t count)
260{
261 struct nvmet_port *port = to_nvmet_port(item);
262 int ret;
263
264 if (nvmet_is_port_enabled(p: port, caller: __func__))
265 return -EACCES;
266 ret = kstrtoint(s: page, base: 0, res: &port->inline_data_size);
267 if (ret) {
268 pr_err("Invalid value '%s' for inline_data_size\n", page);
269 return -EINVAL;
270 }
271 return count;
272}
273
274CONFIGFS_ATTR(nvmet_, param_inline_data_size);
275
276static ssize_t nvmet_param_max_queue_size_show(struct config_item *item,
277 char *page)
278{
279 struct nvmet_port *port = to_nvmet_port(item);
280
281 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", port->max_queue_size);
282}
283
284static ssize_t nvmet_param_max_queue_size_store(struct config_item *item,
285 const char *page, size_t count)
286{
287 struct nvmet_port *port = to_nvmet_port(item);
288 int ret;
289
290 if (nvmet_is_port_enabled(p: port, caller: __func__))
291 return -EACCES;
292 ret = kstrtoint(s: page, base: 0, res: &port->max_queue_size);
293 if (ret) {
294 pr_err("Invalid value '%s' for max_queue_size\n", page);
295 return -EINVAL;
296 }
297 return count;
298}
299
300CONFIGFS_ATTR(nvmet_, param_max_queue_size);
301
302#ifdef CONFIG_BLK_DEV_INTEGRITY
303static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
304 char *page)
305{
306 struct nvmet_port *port = to_nvmet_port(item);
307
308 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", port->pi_enable);
309}
310
311static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
312 const char *page, size_t count)
313{
314 struct nvmet_port *port = to_nvmet_port(item);
315 bool val;
316
317 if (kstrtobool(s: page, res: &val))
318 return -EINVAL;
319
320 if (nvmet_is_port_enabled(p: port, caller: __func__))
321 return -EACCES;
322
323 port->pi_enable = val;
324 return count;
325}
326
327CONFIGFS_ATTR(nvmet_, param_pi_enable);
328#endif
329
330static ssize_t nvmet_addr_trtype_show(struct config_item *item,
331 char *page)
332{
333 struct nvmet_port *port = to_nvmet_port(item);
334 int i;
335
336 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
337 if (port->disc_addr.trtype == nvmet_transport[i].type)
338 return snprintf(buf: page, PAGE_SIZE,
339 fmt: "%s\n", nvmet_transport[i].name);
340 }
341
342 return sprintf(buf: page, fmt: "\n");
343}
344
345static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
346{
347 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
348 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
349 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
350}
351
352static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
353{
354 port->disc_addr.tsas.tcp.sectype = sectype;
355}
356
357static ssize_t nvmet_addr_trtype_store(struct config_item *item,
358 const char *page, size_t count)
359{
360 struct nvmet_port *port = to_nvmet_port(item);
361 int i;
362
363 if (nvmet_is_port_enabled(p: port, caller: __func__))
364 return -EACCES;
365
366 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
367 if (sysfs_streq(s1: page, s2: nvmet_transport[i].name))
368 goto found;
369 }
370
371 pr_err("Invalid value '%s' for trtype\n", page);
372 return -EINVAL;
373
374found:
375 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
376 port->disc_addr.trtype = nvmet_transport[i].type;
377 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
378 nvmet_port_init_tsas_rdma(port);
379 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
380 nvmet_port_init_tsas_tcp(port, sectype: NVMF_TCP_SECTYPE_NONE);
381 return count;
382}
383
384CONFIGFS_ATTR(nvmet_, addr_trtype);
385
386static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
387 { NVMF_TCP_SECTYPE_NONE, "none" },
388 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" },
389};
390
391static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
392 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" },
393 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" },
394};
395
396static ssize_t nvmet_addr_tsas_show(struct config_item *item,
397 char *page)
398{
399 struct nvmet_port *port = to_nvmet_port(item);
400 int i;
401
402 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
403 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
404 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
405 return sprintf(buf: page, fmt: "%s\n", nvmet_addr_tsas_tcp[i].name);
406 }
407 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
408 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
409 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
410 return sprintf(buf: page, fmt: "%s\n", nvmet_addr_tsas_rdma[i].name);
411 }
412 }
413 return sprintf(buf: page, fmt: "reserved\n");
414}
415
416static ssize_t nvmet_addr_tsas_store(struct config_item *item,
417 const char *page, size_t count)
418{
419 struct nvmet_port *port = to_nvmet_port(item);
420 u8 treq = nvmet_port_disc_addr_treq_mask(port);
421 u8 sectype;
422 int i;
423
424 if (nvmet_is_port_enabled(p: port, caller: __func__))
425 return -EACCES;
426
427 if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
428 return -EINVAL;
429
430 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
431 if (sysfs_streq(s1: page, s2: nvmet_addr_tsas_tcp[i].name)) {
432 sectype = nvmet_addr_tsas_tcp[i].type;
433 goto found;
434 }
435 }
436
437 pr_err("Invalid value '%s' for tsas\n", page);
438 return -EINVAL;
439
440found:
441 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
442 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
443 pr_err("TLS is not supported\n");
444 return -EINVAL;
445 }
446 if (!port->keyring) {
447 pr_err("TLS keyring not configured\n");
448 return -EINVAL;
449 }
450 }
451
452 nvmet_port_init_tsas_tcp(port, sectype);
453 /*
454 * If TLS is enabled TREQ should be set to 'required' per default
455 */
456 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
457 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
458
459 if (sc == NVMF_TREQ_NOT_SPECIFIED)
460 treq |= NVMF_TREQ_REQUIRED;
461 else
462 treq |= sc;
463 } else {
464 treq |= NVMF_TREQ_NOT_SPECIFIED;
465 }
466 port->disc_addr.treq = treq;
467 return count;
468}
469
470CONFIGFS_ATTR(nvmet_, addr_tsas);
471
472/*
473 * Namespace structures & file operation functions below
474 */
475static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
476{
477 return sprintf(buf: page, fmt: "%s\n", to_nvmet_ns(item)->device_path);
478}
479
480static ssize_t nvmet_ns_device_path_store(struct config_item *item,
481 const char *page, size_t count)
482{
483 struct nvmet_ns *ns = to_nvmet_ns(item);
484 struct nvmet_subsys *subsys = ns->subsys;
485 size_t len;
486 int ret;
487
488 mutex_lock(&subsys->lock);
489 ret = -EBUSY;
490 if (ns->enabled)
491 goto out_unlock;
492
493 ret = -EINVAL;
494 len = strcspn(page, "\n");
495 if (!len)
496 goto out_unlock;
497
498 kfree(objp: ns->device_path);
499 ret = -ENOMEM;
500 ns->device_path = kmemdup_nul(s: page, len, GFP_KERNEL);
501 if (!ns->device_path)
502 goto out_unlock;
503
504 mutex_unlock(lock: &subsys->lock);
505 return count;
506
507out_unlock:
508 mutex_unlock(lock: &subsys->lock);
509 return ret;
510}
511
512CONFIGFS_ATTR(nvmet_ns_, device_path);
513
514#ifdef CONFIG_PCI_P2PDMA
515static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
516{
517 struct nvmet_ns *ns = to_nvmet_ns(item);
518
519 return pci_p2pdma_enable_show(page, p2p_dev: ns->p2p_dev, use_p2pdma: ns->use_p2pmem);
520}
521
522static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
523 const char *page, size_t count)
524{
525 struct nvmet_ns *ns = to_nvmet_ns(item);
526 struct pci_dev *p2p_dev = NULL;
527 bool use_p2pmem;
528 int ret = count;
529 int error;
530
531 mutex_lock(&ns->subsys->lock);
532 if (ns->enabled) {
533 ret = -EBUSY;
534 goto out_unlock;
535 }
536
537 error = pci_p2pdma_enable_store(page, p2p_dev: &p2p_dev, use_p2pdma: &use_p2pmem);
538 if (error) {
539 ret = error;
540 goto out_unlock;
541 }
542
543 ns->use_p2pmem = use_p2pmem;
544 pci_dev_put(dev: ns->p2p_dev);
545 ns->p2p_dev = p2p_dev;
546
547out_unlock:
548 mutex_unlock(lock: &ns->subsys->lock);
549
550 return ret;
551}
552
553CONFIGFS_ATTR(nvmet_ns_, p2pmem);
554#endif /* CONFIG_PCI_P2PDMA */
555
556static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
557{
558 return sprintf(buf: page, fmt: "%pUb\n", &to_nvmet_ns(item)->uuid);
559}
560
561static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
562 const char *page, size_t count)
563{
564 struct nvmet_ns *ns = to_nvmet_ns(item);
565 struct nvmet_subsys *subsys = ns->subsys;
566 int ret = 0;
567
568 mutex_lock(&subsys->lock);
569 if (ns->enabled) {
570 ret = -EBUSY;
571 goto out_unlock;
572 }
573
574 if (uuid_parse(uuid: page, u: &ns->uuid))
575 ret = -EINVAL;
576
577out_unlock:
578 mutex_unlock(lock: &subsys->lock);
579 return ret ? ret : count;
580}
581
582CONFIGFS_ATTR(nvmet_ns_, device_uuid);
583
584static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
585{
586 return sprintf(buf: page, fmt: "%pUb\n", &to_nvmet_ns(item)->nguid);
587}
588
589static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
590 const char *page, size_t count)
591{
592 struct nvmet_ns *ns = to_nvmet_ns(item);
593 struct nvmet_subsys *subsys = ns->subsys;
594 u8 nguid[16];
595 const char *p = page;
596 int i;
597 int ret = 0;
598
599 mutex_lock(&subsys->lock);
600 if (ns->enabled) {
601 ret = -EBUSY;
602 goto out_unlock;
603 }
604
605 for (i = 0; i < 16; i++) {
606 if (p + 2 > page + count) {
607 ret = -EINVAL;
608 goto out_unlock;
609 }
610 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
611 ret = -EINVAL;
612 goto out_unlock;
613 }
614
615 nguid[i] = (hex_to_bin(ch: p[0]) << 4) | hex_to_bin(ch: p[1]);
616 p += 2;
617
618 if (*p == '-' || *p == ':')
619 p++;
620 }
621
622 memcpy(&ns->nguid, nguid, sizeof(nguid));
623out_unlock:
624 mutex_unlock(lock: &subsys->lock);
625 return ret ? ret : count;
626}
627
628CONFIGFS_ATTR(nvmet_ns_, device_nguid);
629
630static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
631{
632 return sprintf(buf: page, fmt: "%u\n", to_nvmet_ns(item)->anagrpid);
633}
634
635static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
636 const char *page, size_t count)
637{
638 struct nvmet_ns *ns = to_nvmet_ns(item);
639 u32 oldgrpid, newgrpid;
640 int ret;
641
642 ret = kstrtou32(s: page, base: 0, res: &newgrpid);
643 if (ret)
644 return ret;
645
646 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
647 return -EINVAL;
648
649 down_write(sem: &nvmet_ana_sem);
650 oldgrpid = ns->anagrpid;
651 newgrpid = array_index_nospec(newgrpid, NVMET_MAX_ANAGRPS);
652 nvmet_ana_group_enabled[newgrpid]++;
653 ns->anagrpid = newgrpid;
654 nvmet_ana_group_enabled[oldgrpid]--;
655 nvmet_ana_chgcnt++;
656 up_write(sem: &nvmet_ana_sem);
657
658 nvmet_send_ana_event(subsys: ns->subsys, NULL);
659 return count;
660}
661
662CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
663
664static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
665{
666 return sprintf(buf: page, fmt: "%d\n", to_nvmet_ns(item)->enabled);
667}
668
669static ssize_t nvmet_ns_enable_store(struct config_item *item,
670 const char *page, size_t count)
671{
672 struct nvmet_ns *ns = to_nvmet_ns(item);
673 bool enable;
674 int ret = 0;
675
676 if (kstrtobool(s: page, res: &enable))
677 return -EINVAL;
678
679 if (enable)
680 ret = nvmet_ns_enable(ns);
681 else
682 nvmet_ns_disable(ns);
683
684 return ret ? ret : count;
685}
686
687CONFIGFS_ATTR(nvmet_ns_, enable);
688
689static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
690{
691 return sprintf(buf: page, fmt: "%d\n", to_nvmet_ns(item)->buffered_io);
692}
693
694static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
695 const char *page, size_t count)
696{
697 struct nvmet_ns *ns = to_nvmet_ns(item);
698 bool val;
699
700 if (kstrtobool(s: page, res: &val))
701 return -EINVAL;
702
703 mutex_lock(&ns->subsys->lock);
704 if (ns->enabled) {
705 pr_err("disable ns before setting buffered_io value.\n");
706 mutex_unlock(lock: &ns->subsys->lock);
707 return -EINVAL;
708 }
709
710 ns->buffered_io = val;
711 mutex_unlock(lock: &ns->subsys->lock);
712 return count;
713}
714
715CONFIGFS_ATTR(nvmet_ns_, buffered_io);
716
717static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
718 const char *page, size_t count)
719{
720 struct nvmet_ns *ns = to_nvmet_ns(item);
721 bool val;
722
723 if (kstrtobool(s: page, res: &val))
724 return -EINVAL;
725
726 if (!val)
727 return -EINVAL;
728
729 mutex_lock(&ns->subsys->lock);
730 if (!ns->enabled) {
731 pr_err("enable ns before revalidate.\n");
732 mutex_unlock(lock: &ns->subsys->lock);
733 return -EINVAL;
734 }
735 if (nvmet_ns_revalidate(ns))
736 nvmet_ns_changed(subsys: ns->subsys, nsid: ns->nsid);
737 mutex_unlock(lock: &ns->subsys->lock);
738 return count;
739}
740
741CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
742
743static struct configfs_attribute *nvmet_ns_attrs[] = {
744 &nvmet_ns_attr_device_path,
745 &nvmet_ns_attr_device_nguid,
746 &nvmet_ns_attr_device_uuid,
747 &nvmet_ns_attr_ana_grpid,
748 &nvmet_ns_attr_enable,
749 &nvmet_ns_attr_buffered_io,
750 &nvmet_ns_attr_revalidate_size,
751#ifdef CONFIG_PCI_P2PDMA
752 &nvmet_ns_attr_p2pmem,
753#endif
754 NULL,
755};
756
757static void nvmet_ns_release(struct config_item *item)
758{
759 struct nvmet_ns *ns = to_nvmet_ns(item);
760
761 nvmet_ns_free(ns);
762}
763
764static struct configfs_item_operations nvmet_ns_item_ops = {
765 .release = nvmet_ns_release,
766};
767
768static const struct config_item_type nvmet_ns_type = {
769 .ct_item_ops = &nvmet_ns_item_ops,
770 .ct_attrs = nvmet_ns_attrs,
771 .ct_owner = THIS_MODULE,
772};
773
774static struct config_group *nvmet_ns_make(struct config_group *group,
775 const char *name)
776{
777 struct nvmet_subsys *subsys = namespaces_to_subsys(item: &group->cg_item);
778 struct nvmet_ns *ns;
779 int ret;
780 u32 nsid;
781
782 ret = kstrtou32(s: name, base: 0, res: &nsid);
783 if (ret)
784 goto out;
785
786 ret = -EINVAL;
787 if (nsid == 0 || nsid == NVME_NSID_ALL) {
788 pr_err("invalid nsid %#x", nsid);
789 goto out;
790 }
791
792 ret = -ENOMEM;
793 ns = nvmet_ns_alloc(subsys, nsid);
794 if (!ns)
795 goto out;
796 config_group_init_type_name(group: &ns->group, name, type: &nvmet_ns_type);
797
798 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
799
800 return &ns->group;
801out:
802 return ERR_PTR(error: ret);
803}
804
805static struct configfs_group_operations nvmet_namespaces_group_ops = {
806 .make_group = nvmet_ns_make,
807};
808
809static const struct config_item_type nvmet_namespaces_type = {
810 .ct_group_ops = &nvmet_namespaces_group_ops,
811 .ct_owner = THIS_MODULE,
812};
813
814#ifdef CONFIG_NVME_TARGET_PASSTHRU
815
816static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
817 char *page)
818{
819 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
820
821 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n", subsys->passthru_ctrl_path);
822}
823
824static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
825 const char *page, size_t count)
826{
827 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
828 size_t len;
829 int ret;
830
831 mutex_lock(&subsys->lock);
832
833 ret = -EBUSY;
834 if (subsys->passthru_ctrl)
835 goto out_unlock;
836
837 ret = -EINVAL;
838 len = strcspn(page, "\n");
839 if (!len)
840 goto out_unlock;
841
842 kfree(objp: subsys->passthru_ctrl_path);
843 ret = -ENOMEM;
844 subsys->passthru_ctrl_path = kstrndup(s: page, len, GFP_KERNEL);
845 if (!subsys->passthru_ctrl_path)
846 goto out_unlock;
847
848 mutex_unlock(lock: &subsys->lock);
849
850 return count;
851out_unlock:
852 mutex_unlock(lock: &subsys->lock);
853 return ret;
854}
855CONFIGFS_ATTR(nvmet_passthru_, device_path);
856
857static ssize_t nvmet_passthru_enable_show(struct config_item *item,
858 char *page)
859{
860 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
861
862 return sprintf(buf: page, fmt: "%d\n", subsys->passthru_ctrl ? 1 : 0);
863}
864
865static ssize_t nvmet_passthru_enable_store(struct config_item *item,
866 const char *page, size_t count)
867{
868 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
869 bool enable;
870 int ret = 0;
871
872 if (kstrtobool(s: page, res: &enable))
873 return -EINVAL;
874
875 if (enable)
876 ret = nvmet_passthru_ctrl_enable(subsys);
877 else
878 nvmet_passthru_ctrl_disable(subsys);
879
880 return ret ? ret : count;
881}
882CONFIGFS_ATTR(nvmet_passthru_, enable);
883
884static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
885 char *page)
886{
887 return sprintf(buf: page, fmt: "%u\n", to_subsys(item: item->ci_parent)->admin_timeout);
888}
889
890static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
891 const char *page, size_t count)
892{
893 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
894 unsigned int timeout;
895
896 if (kstrtouint(s: page, base: 0, res: &timeout))
897 return -EINVAL;
898 subsys->admin_timeout = timeout;
899 return count;
900}
901CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
902
903static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
904 char *page)
905{
906 return sprintf(buf: page, fmt: "%u\n", to_subsys(item: item->ci_parent)->io_timeout);
907}
908
909static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
910 const char *page, size_t count)
911{
912 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
913 unsigned int timeout;
914
915 if (kstrtouint(s: page, base: 0, res: &timeout))
916 return -EINVAL;
917 subsys->io_timeout = timeout;
918 return count;
919}
920CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
921
922static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
923 char *page)
924{
925 return sprintf(buf: page, fmt: "%u\n", to_subsys(item: item->ci_parent)->clear_ids);
926}
927
928static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
929 const char *page, size_t count)
930{
931 struct nvmet_subsys *subsys = to_subsys(item: item->ci_parent);
932 unsigned int clear_ids;
933
934 if (kstrtouint(s: page, base: 0, res: &clear_ids))
935 return -EINVAL;
936 subsys->clear_ids = clear_ids;
937 return count;
938}
939CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
940
941static struct configfs_attribute *nvmet_passthru_attrs[] = {
942 &nvmet_passthru_attr_device_path,
943 &nvmet_passthru_attr_enable,
944 &nvmet_passthru_attr_admin_timeout,
945 &nvmet_passthru_attr_io_timeout,
946 &nvmet_passthru_attr_clear_ids,
947 NULL,
948};
949
950static const struct config_item_type nvmet_passthru_type = {
951 .ct_attrs = nvmet_passthru_attrs,
952 .ct_owner = THIS_MODULE,
953};
954
955static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
956{
957 config_group_init_type_name(group: &subsys->passthru_group,
958 name: "passthru", type: &nvmet_passthru_type);
959 configfs_add_default_group(new_group: &subsys->passthru_group,
960 group: &subsys->group);
961}
962
963#else /* CONFIG_NVME_TARGET_PASSTHRU */
964
965static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
966{
967}
968
969#endif /* CONFIG_NVME_TARGET_PASSTHRU */
970
971static int nvmet_port_subsys_allow_link(struct config_item *parent,
972 struct config_item *target)
973{
974 struct nvmet_port *port = to_nvmet_port(item: parent->ci_parent);
975 struct nvmet_subsys *subsys;
976 struct nvmet_subsys_link *link, *p;
977 int ret;
978
979 if (target->ci_type != &nvmet_subsys_type) {
980 pr_err("can only link subsystems into the subsystems dir.!\n");
981 return -EINVAL;
982 }
983 subsys = to_subsys(item: target);
984 link = kmalloc(size: sizeof(*link), GFP_KERNEL);
985 if (!link)
986 return -ENOMEM;
987 link->subsys = subsys;
988
989 down_write(sem: &nvmet_config_sem);
990 ret = -EEXIST;
991 list_for_each_entry(p, &port->subsystems, entry) {
992 if (p->subsys == subsys)
993 goto out_free_link;
994 }
995
996 if (list_empty(head: &port->subsystems)) {
997 ret = nvmet_enable_port(port);
998 if (ret)
999 goto out_free_link;
1000 }
1001
1002 list_add_tail(new: &link->entry, head: &port->subsystems);
1003 nvmet_port_disc_changed(port, subsys);
1004
1005 up_write(sem: &nvmet_config_sem);
1006 return 0;
1007
1008out_free_link:
1009 up_write(sem: &nvmet_config_sem);
1010 kfree(objp: link);
1011 return ret;
1012}
1013
1014static void nvmet_port_subsys_drop_link(struct config_item *parent,
1015 struct config_item *target)
1016{
1017 struct nvmet_port *port = to_nvmet_port(item: parent->ci_parent);
1018 struct nvmet_subsys *subsys = to_subsys(item: target);
1019 struct nvmet_subsys_link *p;
1020
1021 down_write(sem: &nvmet_config_sem);
1022 list_for_each_entry(p, &port->subsystems, entry) {
1023 if (p->subsys == subsys)
1024 goto found;
1025 }
1026 up_write(sem: &nvmet_config_sem);
1027 return;
1028
1029found:
1030 list_del(entry: &p->entry);
1031 nvmet_port_del_ctrls(port, subsys);
1032 nvmet_port_disc_changed(port, subsys);
1033
1034 if (list_empty(head: &port->subsystems))
1035 nvmet_disable_port(port);
1036 up_write(sem: &nvmet_config_sem);
1037 kfree(objp: p);
1038}
1039
1040static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1041 .allow_link = nvmet_port_subsys_allow_link,
1042 .drop_link = nvmet_port_subsys_drop_link,
1043};
1044
1045static const struct config_item_type nvmet_port_subsys_type = {
1046 .ct_item_ops = &nvmet_port_subsys_item_ops,
1047 .ct_owner = THIS_MODULE,
1048};
1049
1050static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1051 struct config_item *target)
1052{
1053 struct nvmet_subsys *subsys = to_subsys(item: parent->ci_parent);
1054 struct nvmet_host *host;
1055 struct nvmet_host_link *link, *p;
1056 int ret;
1057
1058 if (target->ci_type != &nvmet_host_type) {
1059 pr_err("can only link hosts into the allowed_hosts directory!\n");
1060 return -EINVAL;
1061 }
1062
1063 host = to_host(item: target);
1064 link = kmalloc(size: sizeof(*link), GFP_KERNEL);
1065 if (!link)
1066 return -ENOMEM;
1067 link->host = host;
1068
1069 down_write(sem: &nvmet_config_sem);
1070 ret = -EINVAL;
1071 if (subsys->allow_any_host) {
1072 pr_err("can't add hosts when allow_any_host is set!\n");
1073 goto out_free_link;
1074 }
1075
1076 ret = -EEXIST;
1077 list_for_each_entry(p, &subsys->hosts, entry) {
1078 if (!strcmp(nvmet_host_name(host: p->host), nvmet_host_name(host)))
1079 goto out_free_link;
1080 }
1081 list_add_tail(new: &link->entry, head: &subsys->hosts);
1082 nvmet_subsys_disc_changed(subsys, host);
1083
1084 up_write(sem: &nvmet_config_sem);
1085 return 0;
1086out_free_link:
1087 up_write(sem: &nvmet_config_sem);
1088 kfree(objp: link);
1089 return ret;
1090}
1091
1092static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1093 struct config_item *target)
1094{
1095 struct nvmet_subsys *subsys = to_subsys(item: parent->ci_parent);
1096 struct nvmet_host *host = to_host(item: target);
1097 struct nvmet_host_link *p;
1098
1099 down_write(sem: &nvmet_config_sem);
1100 list_for_each_entry(p, &subsys->hosts, entry) {
1101 if (!strcmp(nvmet_host_name(host: p->host), nvmet_host_name(host)))
1102 goto found;
1103 }
1104 up_write(sem: &nvmet_config_sem);
1105 return;
1106
1107found:
1108 list_del(entry: &p->entry);
1109 nvmet_subsys_disc_changed(subsys, host);
1110
1111 up_write(sem: &nvmet_config_sem);
1112 kfree(objp: p);
1113}
1114
1115static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1116 .allow_link = nvmet_allowed_hosts_allow_link,
1117 .drop_link = nvmet_allowed_hosts_drop_link,
1118};
1119
1120static const struct config_item_type nvmet_allowed_hosts_type = {
1121 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
1122 .ct_owner = THIS_MODULE,
1123};
1124
1125static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1126 char *page)
1127{
1128 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n",
1129 to_subsys(item)->allow_any_host);
1130}
1131
1132static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1133 const char *page, size_t count)
1134{
1135 struct nvmet_subsys *subsys = to_subsys(item);
1136 bool allow_any_host;
1137 int ret = 0;
1138
1139 if (kstrtobool(s: page, res: &allow_any_host))
1140 return -EINVAL;
1141
1142 down_write(sem: &nvmet_config_sem);
1143 if (allow_any_host && !list_empty(head: &subsys->hosts)) {
1144 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1145 ret = -EINVAL;
1146 goto out_unlock;
1147 }
1148
1149 if (subsys->allow_any_host != allow_any_host) {
1150 subsys->allow_any_host = allow_any_host;
1151 nvmet_subsys_disc_changed(subsys, NULL);
1152 }
1153
1154out_unlock:
1155 up_write(sem: &nvmet_config_sem);
1156 return ret ? ret : count;
1157}
1158
1159CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1160
1161static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1162 char *page)
1163{
1164 struct nvmet_subsys *subsys = to_subsys(item);
1165
1166 if (NVME_TERTIARY(subsys->ver))
1167 return snprintf(buf: page, PAGE_SIZE, fmt: "%llu.%llu.%llu\n",
1168 NVME_MAJOR(subsys->ver),
1169 NVME_MINOR(subsys->ver),
1170 NVME_TERTIARY(subsys->ver));
1171
1172 return snprintf(buf: page, PAGE_SIZE, fmt: "%llu.%llu\n",
1173 NVME_MAJOR(subsys->ver),
1174 NVME_MINOR(subsys->ver));
1175}
1176
1177static ssize_t
1178nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1179 const char *page, size_t count)
1180{
1181 int major, minor, tertiary = 0;
1182 int ret;
1183
1184 if (subsys->subsys_discovered) {
1185 if (NVME_TERTIARY(subsys->ver))
1186 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1187 NVME_MAJOR(subsys->ver),
1188 NVME_MINOR(subsys->ver),
1189 NVME_TERTIARY(subsys->ver));
1190 else
1191 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1192 NVME_MAJOR(subsys->ver),
1193 NVME_MINOR(subsys->ver));
1194 return -EINVAL;
1195 }
1196
1197 /* passthru subsystems use the underlying controller's version */
1198 if (nvmet_is_passthru_subsys(subsys))
1199 return -EINVAL;
1200
1201 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1202 if (ret != 2 && ret != 3)
1203 return -EINVAL;
1204
1205 subsys->ver = NVME_VS(major, minor, tertiary);
1206
1207 return count;
1208}
1209
1210static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1211 const char *page, size_t count)
1212{
1213 struct nvmet_subsys *subsys = to_subsys(item);
1214 ssize_t ret;
1215
1216 down_write(sem: &nvmet_config_sem);
1217 mutex_lock(&subsys->lock);
1218 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1219 mutex_unlock(lock: &subsys->lock);
1220 up_write(sem: &nvmet_config_sem);
1221
1222 return ret;
1223}
1224CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1225
1226/* See Section 1.5 of NVMe 1.4 */
1227static bool nvmet_is_ascii(const char c)
1228{
1229 return c >= 0x20 && c <= 0x7e;
1230}
1231
1232static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1233 char *page)
1234{
1235 struct nvmet_subsys *subsys = to_subsys(item);
1236
1237 return snprintf(buf: page, PAGE_SIZE, fmt: "%.*s\n",
1238 NVMET_SN_MAX_SIZE, subsys->serial);
1239}
1240
1241static ssize_t
1242nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1243 const char *page, size_t count)
1244{
1245 int pos, len = strcspn(page, "\n");
1246
1247 if (subsys->subsys_discovered) {
1248 pr_err("Can't set serial number. %s is already assigned\n",
1249 subsys->serial);
1250 return -EINVAL;
1251 }
1252
1253 if (!len || len > NVMET_SN_MAX_SIZE) {
1254 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1255 NVMET_SN_MAX_SIZE);
1256 return -EINVAL;
1257 }
1258
1259 for (pos = 0; pos < len; pos++) {
1260 if (!nvmet_is_ascii(c: page[pos])) {
1261 pr_err("Serial Number must contain only ASCII strings\n");
1262 return -EINVAL;
1263 }
1264 }
1265
1266 memcpy_and_pad(dest: subsys->serial, NVMET_SN_MAX_SIZE, src: page, count: len, pad: ' ');
1267
1268 return count;
1269}
1270
1271static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1272 const char *page, size_t count)
1273{
1274 struct nvmet_subsys *subsys = to_subsys(item);
1275 ssize_t ret;
1276
1277 down_write(sem: &nvmet_config_sem);
1278 mutex_lock(&subsys->lock);
1279 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1280 mutex_unlock(lock: &subsys->lock);
1281 up_write(sem: &nvmet_config_sem);
1282
1283 return ret;
1284}
1285CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1286
1287static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1288 char *page)
1289{
1290 return snprintf(buf: page, PAGE_SIZE, fmt: "%u\n", to_subsys(item)->cntlid_min);
1291}
1292
1293static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1294 const char *page, size_t cnt)
1295{
1296 u16 cntlid_min;
1297
1298 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1299 return -EINVAL;
1300
1301 if (cntlid_min == 0)
1302 return -EINVAL;
1303
1304 down_write(sem: &nvmet_config_sem);
1305 if (cntlid_min > to_subsys(item)->cntlid_max)
1306 goto out_unlock;
1307 to_subsys(item)->cntlid_min = cntlid_min;
1308 up_write(sem: &nvmet_config_sem);
1309 return cnt;
1310
1311out_unlock:
1312 up_write(sem: &nvmet_config_sem);
1313 return -EINVAL;
1314}
1315CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1316
1317static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1318 char *page)
1319{
1320 return snprintf(buf: page, PAGE_SIZE, fmt: "%u\n", to_subsys(item)->cntlid_max);
1321}
1322
1323static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1324 const char *page, size_t cnt)
1325{
1326 u16 cntlid_max;
1327
1328 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1329 return -EINVAL;
1330
1331 if (cntlid_max == 0)
1332 return -EINVAL;
1333
1334 down_write(sem: &nvmet_config_sem);
1335 if (cntlid_max < to_subsys(item)->cntlid_min)
1336 goto out_unlock;
1337 to_subsys(item)->cntlid_max = cntlid_max;
1338 up_write(sem: &nvmet_config_sem);
1339 return cnt;
1340
1341out_unlock:
1342 up_write(sem: &nvmet_config_sem);
1343 return -EINVAL;
1344}
1345CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1346
1347static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1348 char *page)
1349{
1350 struct nvmet_subsys *subsys = to_subsys(item);
1351
1352 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n", subsys->model_number);
1353}
1354
1355static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1356 const char *page, size_t count)
1357{
1358 int pos = 0, len;
1359 char *val;
1360
1361 if (subsys->subsys_discovered) {
1362 pr_err("Can't set model number. %s is already assigned\n",
1363 subsys->model_number);
1364 return -EINVAL;
1365 }
1366
1367 len = strcspn(page, "\n");
1368 if (!len)
1369 return -EINVAL;
1370
1371 if (len > NVMET_MN_MAX_SIZE) {
1372 pr_err("Model number size can not exceed %d Bytes\n",
1373 NVMET_MN_MAX_SIZE);
1374 return -EINVAL;
1375 }
1376
1377 for (pos = 0; pos < len; pos++) {
1378 if (!nvmet_is_ascii(c: page[pos]))
1379 return -EINVAL;
1380 }
1381
1382 val = kmemdup_nul(s: page, len, GFP_KERNEL);
1383 if (!val)
1384 return -ENOMEM;
1385 kfree(objp: subsys->model_number);
1386 subsys->model_number = val;
1387 return count;
1388}
1389
1390static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1391 const char *page, size_t count)
1392{
1393 struct nvmet_subsys *subsys = to_subsys(item);
1394 ssize_t ret;
1395
1396 down_write(sem: &nvmet_config_sem);
1397 mutex_lock(&subsys->lock);
1398 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1399 mutex_unlock(lock: &subsys->lock);
1400 up_write(sem: &nvmet_config_sem);
1401
1402 return ret;
1403}
1404CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1405
1406static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1407 char *page)
1408{
1409 struct nvmet_subsys *subsys = to_subsys(item);
1410
1411 return sysfs_emit(buf: page, fmt: "0x%06x\n", subsys->ieee_oui);
1412}
1413
1414static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1415 const char *page, size_t count)
1416{
1417 uint32_t val = 0;
1418 int ret;
1419
1420 if (subsys->subsys_discovered) {
1421 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1422 subsys->ieee_oui);
1423 return -EINVAL;
1424 }
1425
1426 ret = kstrtou32(s: page, base: 0, res: &val);
1427 if (ret < 0)
1428 return ret;
1429
1430 if (val >= 0x1000000)
1431 return -EINVAL;
1432
1433 subsys->ieee_oui = val;
1434
1435 return count;
1436}
1437
1438static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1439 const char *page, size_t count)
1440{
1441 struct nvmet_subsys *subsys = to_subsys(item);
1442 ssize_t ret;
1443
1444 down_write(sem: &nvmet_config_sem);
1445 mutex_lock(&subsys->lock);
1446 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1447 mutex_unlock(lock: &subsys->lock);
1448 up_write(sem: &nvmet_config_sem);
1449
1450 return ret;
1451}
1452CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1453
1454static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1455 char *page)
1456{
1457 struct nvmet_subsys *subsys = to_subsys(item);
1458
1459 return sysfs_emit(buf: page, fmt: "%s\n", subsys->firmware_rev);
1460}
1461
1462static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1463 const char *page, size_t count)
1464{
1465 int pos = 0, len;
1466 char *val;
1467
1468 if (subsys->subsys_discovered) {
1469 pr_err("Can't set firmware revision. %s is already assigned\n",
1470 subsys->firmware_rev);
1471 return -EINVAL;
1472 }
1473
1474 len = strcspn(page, "\n");
1475 if (!len)
1476 return -EINVAL;
1477
1478 if (len > NVMET_FR_MAX_SIZE) {
1479 pr_err("Firmware revision size can not exceed %d Bytes\n",
1480 NVMET_FR_MAX_SIZE);
1481 return -EINVAL;
1482 }
1483
1484 for (pos = 0; pos < len; pos++) {
1485 if (!nvmet_is_ascii(c: page[pos]))
1486 return -EINVAL;
1487 }
1488
1489 val = kmemdup_nul(s: page, len, GFP_KERNEL);
1490 if (!val)
1491 return -ENOMEM;
1492
1493 kfree(objp: subsys->firmware_rev);
1494
1495 subsys->firmware_rev = val;
1496
1497 return count;
1498}
1499
1500static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1501 const char *page, size_t count)
1502{
1503 struct nvmet_subsys *subsys = to_subsys(item);
1504 ssize_t ret;
1505
1506 down_write(sem: &nvmet_config_sem);
1507 mutex_lock(&subsys->lock);
1508 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1509 mutex_unlock(lock: &subsys->lock);
1510 up_write(sem: &nvmet_config_sem);
1511
1512 return ret;
1513}
1514CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1515
1516#ifdef CONFIG_BLK_DEV_INTEGRITY
1517static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1518 char *page)
1519{
1520 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", to_subsys(item)->pi_support);
1521}
1522
1523static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1524 const char *page, size_t count)
1525{
1526 struct nvmet_subsys *subsys = to_subsys(item);
1527 bool pi_enable;
1528
1529 if (kstrtobool(s: page, res: &pi_enable))
1530 return -EINVAL;
1531
1532 subsys->pi_support = pi_enable;
1533 return count;
1534}
1535CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1536#endif
1537
1538static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1539 char *page)
1540{
1541 return snprintf(buf: page, PAGE_SIZE, fmt: "%u\n", to_subsys(item)->max_qid);
1542}
1543
1544static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1545 const char *page, size_t cnt)
1546{
1547 struct nvmet_subsys *subsys = to_subsys(item);
1548 struct nvmet_ctrl *ctrl;
1549 u16 qid_max;
1550
1551 if (sscanf(page, "%hu\n", &qid_max) != 1)
1552 return -EINVAL;
1553
1554 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1555 return -EINVAL;
1556
1557 down_write(sem: &nvmet_config_sem);
1558 subsys->max_qid = qid_max;
1559
1560 /* Force reconnect */
1561 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1562 ctrl->ops->delete_ctrl(ctrl);
1563 up_write(sem: &nvmet_config_sem);
1564
1565 return cnt;
1566}
1567CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1568
1569static struct configfs_attribute *nvmet_subsys_attrs[] = {
1570 &nvmet_subsys_attr_attr_allow_any_host,
1571 &nvmet_subsys_attr_attr_version,
1572 &nvmet_subsys_attr_attr_serial,
1573 &nvmet_subsys_attr_attr_cntlid_min,
1574 &nvmet_subsys_attr_attr_cntlid_max,
1575 &nvmet_subsys_attr_attr_model,
1576 &nvmet_subsys_attr_attr_qid_max,
1577 &nvmet_subsys_attr_attr_ieee_oui,
1578 &nvmet_subsys_attr_attr_firmware,
1579#ifdef CONFIG_BLK_DEV_INTEGRITY
1580 &nvmet_subsys_attr_attr_pi_enable,
1581#endif
1582 NULL,
1583};
1584
1585/*
1586 * Subsystem structures & folder operation functions below
1587 */
1588static void nvmet_subsys_release(struct config_item *item)
1589{
1590 struct nvmet_subsys *subsys = to_subsys(item);
1591
1592 nvmet_subsys_del_ctrls(subsys);
1593 nvmet_subsys_put(subsys);
1594}
1595
1596static struct configfs_item_operations nvmet_subsys_item_ops = {
1597 .release = nvmet_subsys_release,
1598};
1599
1600static const struct config_item_type nvmet_subsys_type = {
1601 .ct_item_ops = &nvmet_subsys_item_ops,
1602 .ct_attrs = nvmet_subsys_attrs,
1603 .ct_owner = THIS_MODULE,
1604};
1605
1606static struct config_group *nvmet_subsys_make(struct config_group *group,
1607 const char *name)
1608{
1609 struct nvmet_subsys *subsys;
1610
1611 if (sysfs_streq(s1: name, NVME_DISC_SUBSYS_NAME)) {
1612 pr_err("can't create discovery subsystem through configfs\n");
1613 return ERR_PTR(error: -EINVAL);
1614 }
1615
1616 if (sysfs_streq(s1: name, s2: nvmet_disc_subsys->subsysnqn)) {
1617 pr_err("can't create subsystem using unique discovery NQN\n");
1618 return ERR_PTR(error: -EINVAL);
1619 }
1620
1621 subsys = nvmet_subsys_alloc(subsysnqn: name, type: NVME_NQN_NVME);
1622 if (IS_ERR(ptr: subsys))
1623 return ERR_CAST(ptr: subsys);
1624
1625 config_group_init_type_name(group: &subsys->group, name, type: &nvmet_subsys_type);
1626
1627 config_group_init_type_name(group: &subsys->namespaces_group,
1628 name: "namespaces", type: &nvmet_namespaces_type);
1629 configfs_add_default_group(new_group: &subsys->namespaces_group, group: &subsys->group);
1630
1631 config_group_init_type_name(group: &subsys->allowed_hosts_group,
1632 name: "allowed_hosts", type: &nvmet_allowed_hosts_type);
1633 configfs_add_default_group(new_group: &subsys->allowed_hosts_group,
1634 group: &subsys->group);
1635
1636 nvmet_add_passthru_group(subsys);
1637
1638 return &subsys->group;
1639}
1640
1641static struct configfs_group_operations nvmet_subsystems_group_ops = {
1642 .make_group = nvmet_subsys_make,
1643};
1644
1645static const struct config_item_type nvmet_subsystems_type = {
1646 .ct_group_ops = &nvmet_subsystems_group_ops,
1647 .ct_owner = THIS_MODULE,
1648};
1649
1650static ssize_t nvmet_referral_enable_show(struct config_item *item,
1651 char *page)
1652{
1653 return snprintf(buf: page, PAGE_SIZE, fmt: "%d\n", to_nvmet_port(item)->enabled);
1654}
1655
1656static ssize_t nvmet_referral_enable_store(struct config_item *item,
1657 const char *page, size_t count)
1658{
1659 struct nvmet_port *parent = to_nvmet_port(item: item->ci_parent->ci_parent);
1660 struct nvmet_port *port = to_nvmet_port(item);
1661 bool enable;
1662
1663 if (kstrtobool(s: page, res: &enable))
1664 goto inval;
1665
1666 if (enable)
1667 nvmet_referral_enable(parent, port);
1668 else
1669 nvmet_referral_disable(parent, port);
1670
1671 return count;
1672inval:
1673 pr_err("Invalid value '%s' for enable\n", page);
1674 return -EINVAL;
1675}
1676
1677CONFIGFS_ATTR(nvmet_referral_, enable);
1678
1679/*
1680 * Discovery Service subsystem definitions
1681 */
1682static struct configfs_attribute *nvmet_referral_attrs[] = {
1683 &nvmet_attr_addr_adrfam,
1684 &nvmet_attr_addr_portid,
1685 &nvmet_attr_addr_treq,
1686 &nvmet_attr_addr_traddr,
1687 &nvmet_attr_addr_trsvcid,
1688 &nvmet_attr_addr_trtype,
1689 &nvmet_referral_attr_enable,
1690 NULL,
1691};
1692
1693static void nvmet_referral_notify(struct config_group *group,
1694 struct config_item *item)
1695{
1696 struct nvmet_port *parent = to_nvmet_port(item: item->ci_parent->ci_parent);
1697 struct nvmet_port *port = to_nvmet_port(item);
1698
1699 nvmet_referral_disable(parent, port);
1700}
1701
1702static void nvmet_referral_release(struct config_item *item)
1703{
1704 struct nvmet_port *port = to_nvmet_port(item);
1705
1706 kfree(objp: port);
1707}
1708
1709static struct configfs_item_operations nvmet_referral_item_ops = {
1710 .release = nvmet_referral_release,
1711};
1712
1713static const struct config_item_type nvmet_referral_type = {
1714 .ct_owner = THIS_MODULE,
1715 .ct_attrs = nvmet_referral_attrs,
1716 .ct_item_ops = &nvmet_referral_item_ops,
1717};
1718
1719static struct config_group *nvmet_referral_make(
1720 struct config_group *group, const char *name)
1721{
1722 struct nvmet_port *port;
1723
1724 port = kzalloc(size: sizeof(*port), GFP_KERNEL);
1725 if (!port)
1726 return ERR_PTR(error: -ENOMEM);
1727
1728 INIT_LIST_HEAD(list: &port->entry);
1729 config_group_init_type_name(group: &port->group, name, type: &nvmet_referral_type);
1730
1731 return &port->group;
1732}
1733
1734static struct configfs_group_operations nvmet_referral_group_ops = {
1735 .make_group = nvmet_referral_make,
1736 .disconnect_notify = nvmet_referral_notify,
1737};
1738
1739static const struct config_item_type nvmet_referrals_type = {
1740 .ct_owner = THIS_MODULE,
1741 .ct_group_ops = &nvmet_referral_group_ops,
1742};
1743
1744static struct nvmet_type_name_map nvmet_ana_state[] = {
1745 { NVME_ANA_OPTIMIZED, "optimized" },
1746 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1747 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1748 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1749 { NVME_ANA_CHANGE, "change" },
1750};
1751
1752static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1753 char *page)
1754{
1755 struct nvmet_ana_group *grp = to_ana_group(item);
1756 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1757 int i;
1758
1759 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1760 if (state == nvmet_ana_state[i].type)
1761 return sprintf(buf: page, fmt: "%s\n", nvmet_ana_state[i].name);
1762 }
1763
1764 return sprintf(buf: page, fmt: "\n");
1765}
1766
1767static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1768 const char *page, size_t count)
1769{
1770 struct nvmet_ana_group *grp = to_ana_group(item);
1771 enum nvme_ana_state *ana_state = grp->port->ana_state;
1772 int i;
1773
1774 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1775 if (sysfs_streq(s1: page, s2: nvmet_ana_state[i].name))
1776 goto found;
1777 }
1778
1779 pr_err("Invalid value '%s' for ana_state\n", page);
1780 return -EINVAL;
1781
1782found:
1783 down_write(sem: &nvmet_ana_sem);
1784 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1785 nvmet_ana_chgcnt++;
1786 up_write(sem: &nvmet_ana_sem);
1787 nvmet_port_send_ana_event(port: grp->port);
1788 return count;
1789}
1790
1791CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1792
1793static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1794 &nvmet_ana_group_attr_ana_state,
1795 NULL,
1796};
1797
1798static void nvmet_ana_group_release(struct config_item *item)
1799{
1800 struct nvmet_ana_group *grp = to_ana_group(item);
1801
1802 if (grp == &grp->port->ana_default_group)
1803 return;
1804
1805 down_write(sem: &nvmet_ana_sem);
1806 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1807 nvmet_ana_group_enabled[grp->grpid]--;
1808 up_write(sem: &nvmet_ana_sem);
1809
1810 nvmet_port_send_ana_event(port: grp->port);
1811 kfree(objp: grp);
1812}
1813
1814static struct configfs_item_operations nvmet_ana_group_item_ops = {
1815 .release = nvmet_ana_group_release,
1816};
1817
1818static const struct config_item_type nvmet_ana_group_type = {
1819 .ct_item_ops = &nvmet_ana_group_item_ops,
1820 .ct_attrs = nvmet_ana_group_attrs,
1821 .ct_owner = THIS_MODULE,
1822};
1823
1824static struct config_group *nvmet_ana_groups_make_group(
1825 struct config_group *group, const char *name)
1826{
1827 struct nvmet_port *port = ana_groups_to_port(item: &group->cg_item);
1828 struct nvmet_ana_group *grp;
1829 u32 grpid;
1830 int ret;
1831
1832 ret = kstrtou32(s: name, base: 0, res: &grpid);
1833 if (ret)
1834 goto out;
1835
1836 ret = -EINVAL;
1837 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1838 goto out;
1839
1840 ret = -ENOMEM;
1841 grp = kzalloc(size: sizeof(*grp), GFP_KERNEL);
1842 if (!grp)
1843 goto out;
1844 grp->port = port;
1845 grp->grpid = grpid;
1846
1847 down_write(sem: &nvmet_ana_sem);
1848 grpid = array_index_nospec(grpid, NVMET_MAX_ANAGRPS);
1849 nvmet_ana_group_enabled[grpid]++;
1850 up_write(sem: &nvmet_ana_sem);
1851
1852 nvmet_port_send_ana_event(port: grp->port);
1853
1854 config_group_init_type_name(group: &grp->group, name, type: &nvmet_ana_group_type);
1855 return &grp->group;
1856out:
1857 return ERR_PTR(error: ret);
1858}
1859
1860static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1861 .make_group = nvmet_ana_groups_make_group,
1862};
1863
1864static const struct config_item_type nvmet_ana_groups_type = {
1865 .ct_group_ops = &nvmet_ana_groups_group_ops,
1866 .ct_owner = THIS_MODULE,
1867};
1868
1869/*
1870 * Ports definitions.
1871 */
1872static void nvmet_port_release(struct config_item *item)
1873{
1874 struct nvmet_port *port = to_nvmet_port(item);
1875
1876 /* Let inflight controllers teardown complete */
1877 flush_workqueue(nvmet_wq);
1878 list_del(entry: &port->global_entry);
1879
1880 key_put(key: port->keyring);
1881 kfree(objp: port->ana_state);
1882 kfree(objp: port);
1883}
1884
1885static struct configfs_attribute *nvmet_port_attrs[] = {
1886 &nvmet_attr_addr_adrfam,
1887 &nvmet_attr_addr_treq,
1888 &nvmet_attr_addr_traddr,
1889 &nvmet_attr_addr_trsvcid,
1890 &nvmet_attr_addr_trtype,
1891 &nvmet_attr_addr_tsas,
1892 &nvmet_attr_param_inline_data_size,
1893 &nvmet_attr_param_max_queue_size,
1894#ifdef CONFIG_BLK_DEV_INTEGRITY
1895 &nvmet_attr_param_pi_enable,
1896#endif
1897 NULL,
1898};
1899
1900static struct configfs_item_operations nvmet_port_item_ops = {
1901 .release = nvmet_port_release,
1902};
1903
1904static const struct config_item_type nvmet_port_type = {
1905 .ct_attrs = nvmet_port_attrs,
1906 .ct_item_ops = &nvmet_port_item_ops,
1907 .ct_owner = THIS_MODULE,
1908};
1909
1910static struct config_group *nvmet_ports_make(struct config_group *group,
1911 const char *name)
1912{
1913 struct nvmet_port *port;
1914 u16 portid;
1915 u32 i;
1916
1917 if (kstrtou16(s: name, base: 0, res: &portid))
1918 return ERR_PTR(error: -EINVAL);
1919
1920 port = kzalloc(size: sizeof(*port), GFP_KERNEL);
1921 if (!port)
1922 return ERR_PTR(error: -ENOMEM);
1923
1924 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1925 size: sizeof(*port->ana_state), GFP_KERNEL);
1926 if (!port->ana_state) {
1927 kfree(objp: port);
1928 return ERR_PTR(error: -ENOMEM);
1929 }
1930
1931 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
1932 port->keyring = key_lookup(id: nvme_keyring_id());
1933 if (IS_ERR(ptr: port->keyring)) {
1934 pr_warn("NVMe keyring not available, disabling TLS\n");
1935 port->keyring = NULL;
1936 }
1937 }
1938
1939 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1940 if (i == NVMET_DEFAULT_ANA_GRPID)
1941 port->ana_state[1] = NVME_ANA_OPTIMIZED;
1942 else
1943 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1944 }
1945
1946 list_add(new: &port->global_entry, head: &nvmet_ports_list);
1947
1948 INIT_LIST_HEAD(list: &port->entry);
1949 INIT_LIST_HEAD(list: &port->subsystems);
1950 INIT_LIST_HEAD(list: &port->referrals);
1951 port->inline_data_size = -1; /* < 0 == let the transport choose */
1952 port->max_queue_size = -1; /* < 0 == let the transport choose */
1953
1954 port->disc_addr.portid = cpu_to_le16(portid);
1955 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1956 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1957 config_group_init_type_name(group: &port->group, name, type: &nvmet_port_type);
1958
1959 config_group_init_type_name(group: &port->subsys_group,
1960 name: "subsystems", type: &nvmet_port_subsys_type);
1961 configfs_add_default_group(new_group: &port->subsys_group, group: &port->group);
1962
1963 config_group_init_type_name(group: &port->referrals_group,
1964 name: "referrals", type: &nvmet_referrals_type);
1965 configfs_add_default_group(new_group: &port->referrals_group, group: &port->group);
1966
1967 config_group_init_type_name(group: &port->ana_groups_group,
1968 name: "ana_groups", type: &nvmet_ana_groups_type);
1969 configfs_add_default_group(new_group: &port->ana_groups_group, group: &port->group);
1970
1971 port->ana_default_group.port = port;
1972 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1973 config_group_init_type_name(group: &port->ana_default_group.group,
1974 __stringify(NVMET_DEFAULT_ANA_GRPID),
1975 type: &nvmet_ana_group_type);
1976 configfs_add_default_group(new_group: &port->ana_default_group.group,
1977 group: &port->ana_groups_group);
1978
1979 return &port->group;
1980}
1981
1982static struct configfs_group_operations nvmet_ports_group_ops = {
1983 .make_group = nvmet_ports_make,
1984};
1985
1986static const struct config_item_type nvmet_ports_type = {
1987 .ct_group_ops = &nvmet_ports_group_ops,
1988 .ct_owner = THIS_MODULE,
1989};
1990
1991static struct config_group nvmet_subsystems_group;
1992static struct config_group nvmet_ports_group;
1993
1994#ifdef CONFIG_NVME_TARGET_AUTH
1995static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1996 char *page)
1997{
1998 u8 *dhchap_secret = to_host(item)->dhchap_secret;
1999
2000 if (!dhchap_secret)
2001 return sprintf(buf: page, fmt: "\n");
2002 return sprintf(buf: page, fmt: "%s\n", dhchap_secret);
2003}
2004
2005static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
2006 const char *page, size_t count)
2007{
2008 struct nvmet_host *host = to_host(item);
2009 int ret;
2010
2011 ret = nvmet_auth_set_key(host, secret: page, set_ctrl: false);
2012 /*
2013 * Re-authentication is a soft state, so keep the
2014 * current authentication valid until the host
2015 * requests re-authentication.
2016 */
2017 return ret < 0 ? ret : count;
2018}
2019
2020CONFIGFS_ATTR(nvmet_host_, dhchap_key);
2021
2022static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
2023 char *page)
2024{
2025 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
2026
2027 if (!dhchap_secret)
2028 return sprintf(buf: page, fmt: "\n");
2029 return sprintf(buf: page, fmt: "%s\n", dhchap_secret);
2030}
2031
2032static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
2033 const char *page, size_t count)
2034{
2035 struct nvmet_host *host = to_host(item);
2036 int ret;
2037
2038 ret = nvmet_auth_set_key(host, secret: page, set_ctrl: true);
2039 /*
2040 * Re-authentication is a soft state, so keep the
2041 * current authentication valid until the host
2042 * requests re-authentication.
2043 */
2044 return ret < 0 ? ret : count;
2045}
2046
2047CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2048
2049static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2050 char *page)
2051{
2052 struct nvmet_host *host = to_host(item);
2053 const char *hash_name = nvme_auth_hmac_name(hmac_id: host->dhchap_hash_id);
2054
2055 return sprintf(buf: page, fmt: "%s\n", hash_name ? hash_name : "none");
2056}
2057
2058static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2059 const char *page, size_t count)
2060{
2061 struct nvmet_host *host = to_host(item);
2062 u8 hmac_id;
2063
2064 hmac_id = nvme_auth_hmac_id(hmac_name: page);
2065 if (hmac_id == NVME_AUTH_HASH_INVALID)
2066 return -EINVAL;
2067 if (!crypto_has_shash(alg_name: nvme_auth_hmac_name(hmac_id), type: 0, mask: 0))
2068 return -ENOTSUPP;
2069 host->dhchap_hash_id = hmac_id;
2070 return count;
2071}
2072
2073CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2074
2075static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2076 char *page)
2077{
2078 struct nvmet_host *host = to_host(item);
2079 const char *dhgroup = nvme_auth_dhgroup_name(dhgroup_id: host->dhchap_dhgroup_id);
2080
2081 return sprintf(buf: page, fmt: "%s\n", dhgroup ? dhgroup : "none");
2082}
2083
2084static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2085 const char *page, size_t count)
2086{
2087 struct nvmet_host *host = to_host(item);
2088 int dhgroup_id;
2089
2090 dhgroup_id = nvme_auth_dhgroup_id(dhgroup_name: page);
2091 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2092 return -EINVAL;
2093 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2094 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2095
2096 if (!crypto_has_kpp(alg_name: kpp, type: 0, mask: 0))
2097 return -EINVAL;
2098 }
2099 host->dhchap_dhgroup_id = dhgroup_id;
2100 return count;
2101}
2102
2103CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2104
2105static struct configfs_attribute *nvmet_host_attrs[] = {
2106 &nvmet_host_attr_dhchap_key,
2107 &nvmet_host_attr_dhchap_ctrl_key,
2108 &nvmet_host_attr_dhchap_hash,
2109 &nvmet_host_attr_dhchap_dhgroup,
2110 NULL,
2111};
2112#endif /* CONFIG_NVME_TARGET_AUTH */
2113
2114static void nvmet_host_release(struct config_item *item)
2115{
2116 struct nvmet_host *host = to_host(item);
2117
2118#ifdef CONFIG_NVME_TARGET_AUTH
2119 kfree(objp: host->dhchap_secret);
2120 kfree(objp: host->dhchap_ctrl_secret);
2121#endif
2122 kfree(objp: host);
2123}
2124
2125static struct configfs_item_operations nvmet_host_item_ops = {
2126 .release = nvmet_host_release,
2127};
2128
2129static const struct config_item_type nvmet_host_type = {
2130 .ct_item_ops = &nvmet_host_item_ops,
2131#ifdef CONFIG_NVME_TARGET_AUTH
2132 .ct_attrs = nvmet_host_attrs,
2133#endif
2134 .ct_owner = THIS_MODULE,
2135};
2136
2137static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2138 const char *name)
2139{
2140 struct nvmet_host *host;
2141
2142 host = kzalloc(size: sizeof(*host), GFP_KERNEL);
2143 if (!host)
2144 return ERR_PTR(error: -ENOMEM);
2145
2146#ifdef CONFIG_NVME_TARGET_AUTH
2147 /* Default to SHA256 */
2148 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2149#endif
2150
2151 config_group_init_type_name(group: &host->group, name, type: &nvmet_host_type);
2152
2153 return &host->group;
2154}
2155
2156static struct configfs_group_operations nvmet_hosts_group_ops = {
2157 .make_group = nvmet_hosts_make_group,
2158};
2159
2160static const struct config_item_type nvmet_hosts_type = {
2161 .ct_group_ops = &nvmet_hosts_group_ops,
2162 .ct_owner = THIS_MODULE,
2163};
2164
2165static struct config_group nvmet_hosts_group;
2166
2167static ssize_t nvmet_root_discovery_nqn_show(struct config_item *item,
2168 char *page)
2169{
2170 return snprintf(buf: page, PAGE_SIZE, fmt: "%s\n", nvmet_disc_subsys->subsysnqn);
2171}
2172
2173static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item,
2174 const char *page, size_t count)
2175{
2176 struct list_head *entry;
2177 size_t len;
2178
2179 len = strcspn(page, "\n");
2180 if (!len || len > NVMF_NQN_FIELD_LEN - 1)
2181 return -EINVAL;
2182
2183 down_write(sem: &nvmet_config_sem);
2184 list_for_each(entry, &nvmet_subsystems_group.cg_children) {
2185 struct config_item *item =
2186 container_of(entry, struct config_item, ci_entry);
2187
2188 if (!strncmp(config_item_name(item), page, len)) {
2189 pr_err("duplicate NQN %s\n", config_item_name(item));
2190 up_write(sem: &nvmet_config_sem);
2191 return -EINVAL;
2192 }
2193 }
2194 memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN);
2195 memcpy(nvmet_disc_subsys->subsysnqn, page, len);
2196 up_write(sem: &nvmet_config_sem);
2197
2198 return len;
2199}
2200
2201CONFIGFS_ATTR(nvmet_root_, discovery_nqn);
2202
2203static struct configfs_attribute *nvmet_root_attrs[] = {
2204 &nvmet_root_attr_discovery_nqn,
2205 NULL,
2206};
2207
2208static const struct config_item_type nvmet_root_type = {
2209 .ct_attrs = nvmet_root_attrs,
2210 .ct_owner = THIS_MODULE,
2211};
2212
2213static struct configfs_subsystem nvmet_configfs_subsystem = {
2214 .su_group = {
2215 .cg_item = {
2216 .ci_namebuf = "nvmet",
2217 .ci_type = &nvmet_root_type,
2218 },
2219 },
2220};
2221
2222int __init nvmet_init_configfs(void)
2223{
2224 int ret;
2225
2226 config_group_init(group: &nvmet_configfs_subsystem.su_group);
2227 mutex_init(&nvmet_configfs_subsystem.su_mutex);
2228
2229 config_group_init_type_name(group: &nvmet_subsystems_group,
2230 name: "subsystems", type: &nvmet_subsystems_type);
2231 configfs_add_default_group(new_group: &nvmet_subsystems_group,
2232 group: &nvmet_configfs_subsystem.su_group);
2233
2234 config_group_init_type_name(group: &nvmet_ports_group,
2235 name: "ports", type: &nvmet_ports_type);
2236 configfs_add_default_group(new_group: &nvmet_ports_group,
2237 group: &nvmet_configfs_subsystem.su_group);
2238
2239 config_group_init_type_name(group: &nvmet_hosts_group,
2240 name: "hosts", type: &nvmet_hosts_type);
2241 configfs_add_default_group(new_group: &nvmet_hosts_group,
2242 group: &nvmet_configfs_subsystem.su_group);
2243
2244 ret = configfs_register_subsystem(subsys: &nvmet_configfs_subsystem);
2245 if (ret) {
2246 pr_err("configfs_register_subsystem: %d\n", ret);
2247 return ret;
2248 }
2249
2250 return 0;
2251}
2252
2253void __exit nvmet_exit_configfs(void)
2254{
2255 configfs_unregister_subsystem(subsys: &nvmet_configfs_subsystem);
2256}
2257

source code of linux/drivers/nvme/target/configfs.c