1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
3#include <linux/platform_device.h>
4#include <linux/module.h>
5#include <linux/device.h>
6#include <linux/kernel.h>
7#include <linux/acpi.h>
8#include <linux/pci.h>
9#include <linux/node.h>
10#include <asm/div64.h>
11#include "cxlpci.h"
12#include "cxl.h"
13
14struct cxl_cxims_data {
15 int nr_maps;
16 u64 xormaps[] __counted_by(nr_maps);
17};
18
19static const guid_t acpi_cxl_qtg_id_guid =
20 GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
21 0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
22
23
24static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
25{
26 struct cxl_cxims_data *cximsd = cxlrd->platform_data;
27 int hbiw = cxlrd->cxlsd.nr_targets;
28 u64 val;
29 int pos;
30
31 /* No xormaps for host bridge interleave ways of 1 or 3 */
32 if (hbiw == 1 || hbiw == 3)
33 return hpa;
34
35 /*
36 * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
37 * the position bit to its value before the xormap was applied at
38 * HPA->DPA translation.
39 *
40 * pos is the lowest set bit in an XORMAP
41 * val is the XORALLBITS(HPA & XORMAP)
42 *
43 * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
44 * as an operation that outputs a single bit by XORing all the
45 * bits in the input (hpa & xormap). Implement XORALLBITS using
46 * hweight64(). If the hamming weight is even the XOR of those
47 * bits results in val==0, if odd the XOR result is val==1.
48 */
49
50 for (int i = 0; i < cximsd->nr_maps; i++) {
51 if (!cximsd->xormaps[i])
52 continue;
53 pos = __ffs(cximsd->xormaps[i]);
54 val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
55 hpa = (hpa & ~(1ULL << pos)) | (val << pos);
56 }
57
58 return hpa;
59}
60
61struct cxl_cxims_context {
62 struct device *dev;
63 struct cxl_root_decoder *cxlrd;
64};
65
66static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
67 const unsigned long end)
68{
69 struct acpi_cedt_cxims *cxims = (struct acpi_cedt_cxims *)header;
70 struct cxl_cxims_context *ctx = arg;
71 struct cxl_root_decoder *cxlrd = ctx->cxlrd;
72 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
73 struct device *dev = ctx->dev;
74 struct cxl_cxims_data *cximsd;
75 unsigned int hbig, nr_maps;
76 int rc;
77
78 rc = eig_to_granularity(eig: cxims->hbig, granularity: &hbig);
79 if (rc)
80 return rc;
81
82 /* Does this CXIMS entry apply to the given CXL Window? */
83 if (hbig != cxld->interleave_granularity)
84 return 0;
85
86 /* IW 1,3 do not use xormaps and skip this parsing entirely */
87 if (is_power_of_2(n: cxld->interleave_ways))
88 /* 2, 4, 8, 16 way */
89 nr_maps = ilog2(cxld->interleave_ways);
90 else
91 /* 6, 12 way */
92 nr_maps = ilog2(cxld->interleave_ways / 3);
93
94 if (cxims->nr_xormaps < nr_maps) {
95 dev_dbg(dev, "CXIMS nr_xormaps[%d] expected[%d]\n",
96 cxims->nr_xormaps, nr_maps);
97 return -ENXIO;
98 }
99
100 cximsd = devm_kzalloc(dev, struct_size(cximsd, xormaps, nr_maps),
101 GFP_KERNEL);
102 if (!cximsd)
103 return -ENOMEM;
104 cximsd->nr_maps = nr_maps;
105 memcpy(cximsd->xormaps, cxims->xormap_list,
106 nr_maps * sizeof(*cximsd->xormaps));
107 cxlrd->platform_data = cximsd;
108
109 return 0;
110}
111
112static unsigned long cfmws_to_decoder_flags(int restrictions)
113{
114 unsigned long flags = CXL_DECODER_F_ENABLE;
115
116 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
117 flags |= CXL_DECODER_F_TYPE2;
118 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
119 flags |= CXL_DECODER_F_TYPE3;
120 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
121 flags |= CXL_DECODER_F_RAM;
122 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_PMEM)
123 flags |= CXL_DECODER_F_PMEM;
124 if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_FIXED)
125 flags |= CXL_DECODER_F_LOCK;
126
127 return flags;
128}
129
130static int cxl_acpi_cfmws_verify(struct device *dev,
131 struct acpi_cedt_cfmws *cfmws)
132{
133 int rc, expected_len;
134 unsigned int ways;
135
136 if (cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_MODULO &&
137 cfmws->interleave_arithmetic != ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
138 dev_err(dev, "CFMWS Unknown Interleave Arithmetic: %d\n",
139 cfmws->interleave_arithmetic);
140 return -EINVAL;
141 }
142
143 if (!IS_ALIGNED(cfmws->base_hpa, SZ_256M)) {
144 dev_err(dev, "CFMWS Base HPA not 256MB aligned\n");
145 return -EINVAL;
146 }
147
148 if (!IS_ALIGNED(cfmws->window_size, SZ_256M)) {
149 dev_err(dev, "CFMWS Window Size not 256MB aligned\n");
150 return -EINVAL;
151 }
152
153 rc = eiw_to_ways(eiw: cfmws->interleave_ways, ways: &ways);
154 if (rc) {
155 dev_err(dev, "CFMWS Interleave Ways (%d) invalid\n",
156 cfmws->interleave_ways);
157 return -EINVAL;
158 }
159
160 expected_len = struct_size(cfmws, interleave_targets, ways);
161
162 if (cfmws->header.length < expected_len) {
163 dev_err(dev, "CFMWS length %d less than expected %d\n",
164 cfmws->header.length, expected_len);
165 return -EINVAL;
166 }
167
168 if (cfmws->header.length > expected_len)
169 dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
170 cfmws->header.length, expected_len);
171
172 return 0;
173}
174
175/*
176 * Note, @dev must be the first member, see 'struct cxl_chbs_context'
177 * and mock_acpi_table_parse_cedt()
178 */
179struct cxl_cfmws_context {
180 struct device *dev;
181 struct cxl_port *root_port;
182 struct resource *cxl_res;
183 int id;
184};
185
186/**
187 * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
188 * @handle: ACPI handle
189 * @coord: performance access coordinates
190 * @entries: number of QTG IDs to return
191 * @qos_class: int array provided by caller to return QTG IDs
192 *
193 * Return: number of QTG IDs returned, or -errno for errors
194 *
195 * Issue QTG _DSM with accompanied bandwidth and latency data in order to get
196 * the QTG IDs that are suitable for the performance point in order of most
197 * suitable to least suitable. Write back array of QTG IDs and return the
198 * actual number of QTG IDs written back.
199 */
200static int
201cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
202 int entries, int *qos_class)
203{
204 union acpi_object *out_obj, *out_buf, *obj;
205 union acpi_object in_array[4] = {
206 [0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
207 [1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
208 [2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
209 [3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
210 };
211 union acpi_object in_obj = {
212 .package = {
213 .type = ACPI_TYPE_PACKAGE,
214 .count = 4,
215 .elements = in_array,
216 },
217 };
218 int count, pkg_entries, i;
219 u16 max_qtg;
220 int rc;
221
222 if (!entries)
223 return -EINVAL;
224
225 out_obj = acpi_evaluate_dsm(handle, guid: &acpi_cxl_qtg_id_guid, rev: 1, func: 1, argv4: &in_obj);
226 if (!out_obj)
227 return -ENXIO;
228
229 if (out_obj->type != ACPI_TYPE_PACKAGE) {
230 rc = -ENXIO;
231 goto out;
232 }
233
234 /* Check Max QTG ID */
235 obj = &out_obj->package.elements[0];
236 if (obj->type != ACPI_TYPE_INTEGER) {
237 rc = -ENXIO;
238 goto out;
239 }
240
241 max_qtg = obj->integer.value;
242
243 /* It's legal to have 0 QTG entries */
244 pkg_entries = out_obj->package.count;
245 if (pkg_entries <= 1) {
246 rc = 0;
247 goto out;
248 }
249
250 /* Retrieve QTG IDs package */
251 obj = &out_obj->package.elements[1];
252 if (obj->type != ACPI_TYPE_PACKAGE) {
253 rc = -ENXIO;
254 goto out;
255 }
256
257 pkg_entries = obj->package.count;
258 count = min(entries, pkg_entries);
259 for (i = 0; i < count; i++) {
260 u16 qtg_id;
261
262 out_buf = &obj->package.elements[i];
263 if (out_buf->type != ACPI_TYPE_INTEGER) {
264 rc = -ENXIO;
265 goto out;
266 }
267
268 qtg_id = out_buf->integer.value;
269 if (qtg_id > max_qtg)
270 pr_warn("QTG ID %u greater than MAX %u\n",
271 qtg_id, max_qtg);
272
273 qos_class[i] = qtg_id;
274 }
275 rc = count;
276
277out:
278 ACPI_FREE(out_obj);
279 return rc;
280}
281
282static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
283 struct access_coordinate *coord, int entries,
284 int *qos_class)
285{
286 struct device *dev = cxl_root->port.uport_dev;
287 acpi_handle handle;
288
289 if (!dev_is_platform(dev))
290 return -ENODEV;
291
292 handle = ACPI_HANDLE(dev);
293 if (!handle)
294 return -ENODEV;
295
296 return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
297}
298
299static const struct cxl_root_ops acpi_root_ops = {
300 .qos_class = cxl_acpi_qos_class,
301};
302
303static void del_cxl_resource(struct resource *res)
304{
305 if (!res)
306 return;
307 kfree(objp: res->name);
308 kfree(objp: res);
309}
310
311static struct resource *alloc_cxl_resource(resource_size_t base,
312 resource_size_t n, int id)
313{
314 struct resource *res __free(kfree) = kzalloc(sizeof(*res), GFP_KERNEL);
315
316 if (!res)
317 return NULL;
318
319 res->start = base;
320 res->end = base + n - 1;
321 res->flags = IORESOURCE_MEM;
322 res->name = kasprintf(GFP_KERNEL, fmt: "CXL Window %d", id);
323 if (!res->name)
324 return NULL;
325
326 return no_free_ptr(res);
327}
328
329static int add_or_reset_cxl_resource(struct resource *parent, struct resource *res)
330{
331 int rc = insert_resource(parent, new: res);
332
333 if (rc)
334 del_cxl_resource(res);
335 return rc;
336}
337
338DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
339 if (!IS_ERR_OR_NULL(_T)) put_device(&_T->cxlsd.cxld.dev))
340DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
341static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
342 struct cxl_cfmws_context *ctx)
343{
344 int target_map[CXL_DECODER_MAX_INTERLEAVE];
345 struct cxl_port *root_port = ctx->root_port;
346 struct cxl_cxims_context cxims_ctx;
347 struct device *dev = ctx->dev;
348 struct cxl_decoder *cxld;
349 unsigned int ways, i, ig;
350 int rc;
351
352 rc = cxl_acpi_cfmws_verify(dev, cfmws);
353 if (rc)
354 return rc;
355
356 rc = eiw_to_ways(eiw: cfmws->interleave_ways, ways: &ways);
357 if (rc)
358 return rc;
359 rc = eig_to_granularity(eig: cfmws->granularity, granularity: &ig);
360 if (rc)
361 return rc;
362 for (i = 0; i < ways; i++)
363 target_map[i] = cfmws->interleave_targets[i];
364
365 struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
366 base: cfmws->base_hpa, n: cfmws->window_size, id: ctx->id++);
367 if (!res)
368 return -ENOMEM;
369
370 /* add to the local resource tracking to establish a sort order */
371 rc = add_or_reset_cxl_resource(parent: ctx->cxl_res, no_free_ptr(res));
372 if (rc)
373 return rc;
374
375 struct cxl_root_decoder *cxlrd __free(put_cxlrd) =
376 cxl_root_decoder_alloc(port: root_port, nr_targets: ways);
377
378 if (IS_ERR(ptr: cxlrd))
379 return PTR_ERR(ptr: cxlrd);
380
381 cxld = &cxlrd->cxlsd.cxld;
382 cxld->flags = cfmws_to_decoder_flags(restrictions: cfmws->restrictions);
383 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
384 cxld->hpa_range = (struct range) {
385 .start = cfmws->base_hpa,
386 .end = cfmws->base_hpa + cfmws->window_size - 1,
387 };
388 cxld->interleave_ways = ways;
389 /*
390 * Minimize the x1 granularity to advertise support for any
391 * valid region granularity
392 */
393 if (ways == 1)
394 ig = CXL_DECODER_MIN_GRANULARITY;
395 cxld->interleave_granularity = ig;
396
397 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
398 if (ways != 1 && ways != 3) {
399 cxims_ctx = (struct cxl_cxims_context) {
400 .dev = dev,
401 .cxlrd = cxlrd,
402 };
403 rc = acpi_table_parse_cedt(id: ACPI_CEDT_TYPE_CXIMS,
404 handler_arg: cxl_parse_cxims, arg: &cxims_ctx);
405 if (rc < 0)
406 return rc;
407 if (!cxlrd->platform_data) {
408 dev_err(dev, "No CXIMS for HBIG %u\n", ig);
409 return -EINVAL;
410 }
411 }
412 }
413
414 cxlrd->qos_class = cfmws->qtg_id;
415
416 if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
417 cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
418
419 rc = cxl_decoder_add(cxld, target_map);
420 if (rc)
421 return rc;
422
423 rc = cxl_root_decoder_autoremove(host: dev, no_free_ptr(cxlrd));
424 if (rc)
425 return rc;
426
427 dev_dbg(root_port->dev.parent, "%s added to %s\n",
428 dev_name(&cxld->dev), dev_name(&root_port->dev));
429
430 return 0;
431}
432
433static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
434 const unsigned long end)
435{
436 struct acpi_cedt_cfmws *cfmws = (struct acpi_cedt_cfmws *)header;
437 struct cxl_cfmws_context *ctx = arg;
438 struct device *dev = ctx->dev;
439 int rc;
440
441 rc = __cxl_parse_cfmws(cfmws, ctx);
442 if (rc)
443 dev_err(dev,
444 "Failed to add decode range: [%#llx - %#llx] (%d)\n",
445 cfmws->base_hpa,
446 cfmws->base_hpa + cfmws->window_size - 1, rc);
447 else
448 dev_dbg(dev, "decode range: node: %d range [%#llx - %#llx]\n",
449 phys_to_target_node(cfmws->base_hpa), cfmws->base_hpa,
450 cfmws->base_hpa + cfmws->window_size - 1);
451
452 /* never fail cxl_acpi load for a single window failure */
453 return 0;
454}
455
456__mock struct acpi_device *to_cxl_host_bridge(struct device *host,
457 struct device *dev)
458{
459 struct acpi_device *adev = to_acpi_device(dev);
460
461 if (!acpi_pci_find_root(handle: adev->handle))
462 return NULL;
463
464 if (strcmp(acpi_device_hid(device: adev), "ACPI0016") == 0)
465 return adev;
466 return NULL;
467}
468
469/* Note, @dev is used by mock_acpi_table_parse_cedt() */
470struct cxl_chbs_context {
471 struct device *dev;
472 unsigned long long uid;
473 resource_size_t base;
474 u32 cxl_version;
475 int nr_versions;
476 u32 saved_version;
477};
478
479static int cxl_get_chbs_iter(union acpi_subtable_headers *header, void *arg,
480 const unsigned long end)
481{
482 struct cxl_chbs_context *ctx = arg;
483 struct acpi_cedt_chbs *chbs;
484
485 chbs = (struct acpi_cedt_chbs *) header;
486
487 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
488 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11)
489 return 0;
490
491 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
492 chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20)
493 return 0;
494
495 if (!chbs->base)
496 return 0;
497
498 if (ctx->saved_version != chbs->cxl_version) {
499 /*
500 * cxl_version cannot be overwritten before the next two
501 * checks, then use saved_version
502 */
503 ctx->saved_version = chbs->cxl_version;
504 ctx->nr_versions++;
505 }
506
507 if (ctx->base != CXL_RESOURCE_NONE)
508 return 0;
509
510 if (ctx->uid != chbs->uid)
511 return 0;
512
513 ctx->cxl_version = chbs->cxl_version;
514 ctx->base = chbs->base;
515
516 return 0;
517}
518
519static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
520 struct cxl_chbs_context *ctx)
521{
522 unsigned long long uid;
523 int rc;
524
525 rc = acpi_evaluate_integer(handle: hb->handle, METHOD_NAME__UID, NULL, data: &uid);
526 if (rc != AE_OK) {
527 dev_err(dev, "unable to retrieve _UID\n");
528 return -ENOENT;
529 }
530
531 dev_dbg(dev, "UID found: %lld\n", uid);
532 *ctx = (struct cxl_chbs_context) {
533 .dev = dev,
534 .uid = uid,
535 .base = CXL_RESOURCE_NONE,
536 .cxl_version = UINT_MAX,
537 .saved_version = UINT_MAX,
538 };
539
540 acpi_table_parse_cedt(id: ACPI_CEDT_TYPE_CHBS, handler_arg: cxl_get_chbs_iter, arg: ctx);
541
542 if (ctx->nr_versions > 1) {
543 /*
544 * Disclaim eRCD support given some component register may
545 * only be found via CHBCR
546 */
547 dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
548 }
549
550 return 0;
551}
552
553static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
554{
555 struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
556 u32 uid;
557
558 if (kstrtou32(acpi_device_uid(hb), base: 0, res: &uid))
559 return -EINVAL;
560
561 return acpi_get_genport_coordinates(uid, coord: dport->coord);
562}
563
564static int add_host_bridge_dport(struct device *match, void *arg)
565{
566 int ret;
567 acpi_status rc;
568 struct device *bridge;
569 struct cxl_dport *dport;
570 struct cxl_chbs_context ctx;
571 struct acpi_pci_root *pci_root;
572 struct cxl_port *root_port = arg;
573 struct device *host = root_port->dev.parent;
574 struct acpi_device *hb = to_cxl_host_bridge(host, dev: match);
575
576 if (!hb)
577 return 0;
578
579 rc = cxl_get_chbs(dev: match, hb, ctx: &ctx);
580 if (rc)
581 return rc;
582
583 if (ctx.cxl_version == UINT_MAX) {
584 dev_warn(match, "No CHBS found for Host Bridge (UID %lld)\n",
585 ctx.uid);
586 return 0;
587 }
588
589 if (ctx.base == CXL_RESOURCE_NONE) {
590 dev_warn(match, "CHBS invalid for Host Bridge (UID %lld)\n",
591 ctx.uid);
592 return 0;
593 }
594
595 pci_root = acpi_pci_find_root(handle: hb->handle);
596 bridge = pci_root->bus->bridge;
597
598 /*
599 * In RCH mode, bind the component regs base to the dport. In
600 * VH mode it will be bound to the CXL host bridge's port
601 * object later in add_host_bridge_uport().
602 */
603 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
604 dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
605 &ctx.base);
606 dport = devm_cxl_add_rch_dport(port: root_port, dport_dev: bridge, port_id: ctx.uid,
607 rcrb: ctx.base);
608 } else {
609 dport = devm_cxl_add_dport(port: root_port, dport: bridge, port_id: ctx.uid,
610 CXL_RESOURCE_NONE);
611 }
612
613 if (IS_ERR(ptr: dport))
614 return PTR_ERR(ptr: dport);
615
616 ret = get_genport_coordinates(dev: match, dport);
617 if (ret)
618 dev_dbg(match, "Failed to get generic port perf coordinates.\n");
619
620 return 0;
621}
622
623/*
624 * A host bridge is a dport to a CFMWS decode and it is a uport to the
625 * dport (PCIe Root Ports) in the host bridge.
626 */
627static int add_host_bridge_uport(struct device *match, void *arg)
628{
629 struct cxl_port *root_port = arg;
630 struct device *host = root_port->dev.parent;
631 struct acpi_device *hb = to_cxl_host_bridge(host, dev: match);
632 struct acpi_pci_root *pci_root;
633 struct cxl_dport *dport;
634 struct cxl_port *port;
635 struct device *bridge;
636 struct cxl_chbs_context ctx;
637 resource_size_t component_reg_phys;
638 int rc;
639
640 if (!hb)
641 return 0;
642
643 pci_root = acpi_pci_find_root(handle: hb->handle);
644 bridge = pci_root->bus->bridge;
645 dport = cxl_find_dport_by_dev(port: root_port, dport_dev: bridge);
646 if (!dport) {
647 dev_dbg(host, "host bridge expected and not found\n");
648 return 0;
649 }
650
651 if (dport->rch) {
652 dev_info(bridge, "host supports CXL (restricted)\n");
653 return 0;
654 }
655
656 rc = cxl_get_chbs(dev: match, hb, ctx: &ctx);
657 if (rc)
658 return rc;
659
660 if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
661 dev_warn(bridge,
662 "CXL CHBS version mismatch, skip port registration\n");
663 return 0;
664 }
665
666 component_reg_phys = ctx.base;
667 if (component_reg_phys != CXL_RESOURCE_NONE)
668 dev_dbg(match, "CHBCR found for UID %lld: %pa\n",
669 ctx.uid, &component_reg_phys);
670
671 rc = devm_cxl_register_pci_bus(host, uport_dev: bridge, bus: pci_root->bus);
672 if (rc)
673 return rc;
674
675 port = devm_cxl_add_port(host, uport_dev: bridge, component_reg_phys, parent_dport: dport);
676 if (IS_ERR(ptr: port))
677 return PTR_ERR(ptr: port);
678
679 dev_info(bridge, "host supports CXL\n");
680
681 return 0;
682}
683
684static int add_root_nvdimm_bridge(struct device *match, void *data)
685{
686 struct cxl_decoder *cxld;
687 struct cxl_port *root_port = data;
688 struct cxl_nvdimm_bridge *cxl_nvb;
689 struct device *host = root_port->dev.parent;
690
691 if (!is_root_decoder(dev: match))
692 return 0;
693
694 cxld = to_cxl_decoder(dev: match);
695 if (!(cxld->flags & CXL_DECODER_F_PMEM))
696 return 0;
697
698 cxl_nvb = devm_cxl_add_nvdimm_bridge(host, port: root_port);
699 if (IS_ERR(ptr: cxl_nvb)) {
700 dev_dbg(host, "failed to register pmem\n");
701 return PTR_ERR(ptr: cxl_nvb);
702 }
703 dev_dbg(host, "%s: add: %s\n", dev_name(&root_port->dev),
704 dev_name(&cxl_nvb->dev));
705 return 1;
706}
707
708static struct lock_class_key cxl_root_key;
709
710static void cxl_acpi_lock_reset_class(void *dev)
711{
712 device_lock_reset_class(dev);
713}
714
715static void cxl_set_public_resource(struct resource *priv, struct resource *pub)
716{
717 priv->desc = (unsigned long) pub;
718}
719
720static struct resource *cxl_get_public_resource(struct resource *priv)
721{
722 return (struct resource *) priv->desc;
723}
724
725static void remove_cxl_resources(void *data)
726{
727 struct resource *res, *next, *cxl = data;
728
729 for (res = cxl->child; res; res = next) {
730 struct resource *victim = cxl_get_public_resource(priv: res);
731
732 next = res->sibling;
733 remove_resource(old: res);
734
735 if (victim) {
736 remove_resource(old: victim);
737 kfree(objp: victim);
738 }
739
740 del_cxl_resource(res);
741 }
742}
743
744/**
745 * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource
746 * @cxl_res: A standalone resource tree where each CXL window is a sibling
747 *
748 * Walk each CXL window in @cxl_res and add it to iomem_resource potentially
749 * expanding its boundaries to ensure that any conflicting resources become
750 * children. If a window is expanded it may then conflict with a another window
751 * entry and require the window to be truncated or trimmed. Consider this
752 * situation::
753 *
754 * |-- "CXL Window 0" --||----- "CXL Window 1" -----|
755 * |--------------- "System RAM" -------------|
756 *
757 * ...where platform firmware has established as System RAM resource across 2
758 * windows, but has left some portion of window 1 for dynamic CXL region
759 * provisioning. In this case "Window 0" will span the entirety of the "System
760 * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end
761 * of that "System RAM" resource.
762 */
763static int add_cxl_resources(struct resource *cxl_res)
764{
765 struct resource *res, *new, *next;
766
767 for (res = cxl_res->child; res; res = next) {
768 new = kzalloc(sizeof(*new), GFP_KERNEL);
769 if (!new)
770 return -ENOMEM;
771 new->name = res->name;
772 new->start = res->start;
773 new->end = res->end;
774 new->flags = IORESOURCE_MEM;
775 new->desc = IORES_DESC_CXL;
776
777 /*
778 * Record the public resource in the private cxl_res tree for
779 * later removal.
780 */
781 cxl_set_public_resource(priv: res, pub: new);
782
783 insert_resource_expand_to_fit(root: &iomem_resource, new);
784
785 next = res->sibling;
786 while (next && resource_overlaps(r1: new, r2: next)) {
787 if (resource_contains(r1: new, r2: next)) {
788 struct resource *_next = next->sibling;
789
790 remove_resource(old: next);
791 del_cxl_resource(res: next);
792 next = _next;
793 } else
794 next->start = new->end + 1;
795 }
796 }
797 return 0;
798}
799
800static int pair_cxl_resource(struct device *dev, void *data)
801{
802 struct resource *cxl_res = data;
803 struct resource *p;
804
805 if (!is_root_decoder(dev))
806 return 0;
807
808 for (p = cxl_res->child; p; p = p->sibling) {
809 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
810 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld;
811 struct resource res = {
812 .start = cxld->hpa_range.start,
813 .end = cxld->hpa_range.end,
814 .flags = IORESOURCE_MEM,
815 };
816
817 if (resource_contains(r1: p, r2: &res)) {
818 cxlrd->res = cxl_get_public_resource(priv: p);
819 break;
820 }
821 }
822
823 return 0;
824}
825
826static int cxl_acpi_probe(struct platform_device *pdev)
827{
828 int rc;
829 struct resource *cxl_res;
830 struct cxl_root *cxl_root;
831 struct cxl_port *root_port;
832 struct device *host = &pdev->dev;
833 struct acpi_device *adev = ACPI_COMPANION(host);
834 struct cxl_cfmws_context ctx;
835
836 device_lock_set_class(&pdev->dev, &cxl_root_key);
837 rc = devm_add_action_or_reset(&pdev->dev, cxl_acpi_lock_reset_class,
838 &pdev->dev);
839 if (rc)
840 return rc;
841
842 cxl_res = devm_kzalloc(dev: host, size: sizeof(*cxl_res), GFP_KERNEL);
843 if (!cxl_res)
844 return -ENOMEM;
845 cxl_res->name = "CXL mem";
846 cxl_res->start = 0;
847 cxl_res->end = -1;
848 cxl_res->flags = IORESOURCE_MEM;
849
850 cxl_root = devm_cxl_add_root(host, ops: &acpi_root_ops);
851 if (IS_ERR(ptr: cxl_root))
852 return PTR_ERR(ptr: cxl_root);
853 root_port = &cxl_root->port;
854
855 rc = bus_for_each_dev(bus: adev->dev.bus, NULL, data: root_port,
856 fn: add_host_bridge_dport);
857 if (rc < 0)
858 return rc;
859
860 rc = devm_add_action_or_reset(host, remove_cxl_resources, cxl_res);
861 if (rc)
862 return rc;
863
864 ctx = (struct cxl_cfmws_context) {
865 .dev = host,
866 .root_port = root_port,
867 .cxl_res = cxl_res,
868 };
869 rc = acpi_table_parse_cedt(id: ACPI_CEDT_TYPE_CFMWS, handler_arg: cxl_parse_cfmws, arg: &ctx);
870 if (rc < 0)
871 return -ENXIO;
872
873 rc = add_cxl_resources(cxl_res);
874 if (rc)
875 return rc;
876
877 /*
878 * Populate the root decoders with their related iomem resource,
879 * if present
880 */
881 device_for_each_child(parent: &root_port->dev, data: cxl_res, fn: pair_cxl_resource);
882
883 /*
884 * Root level scanned with host-bridge as dports, now scan host-bridges
885 * for their role as CXL uports to their CXL-capable PCIe Root Ports.
886 */
887 rc = bus_for_each_dev(bus: adev->dev.bus, NULL, data: root_port,
888 fn: add_host_bridge_uport);
889 if (rc < 0)
890 return rc;
891
892 if (IS_ENABLED(CONFIG_CXL_PMEM))
893 rc = device_for_each_child(parent: &root_port->dev, data: root_port,
894 fn: add_root_nvdimm_bridge);
895 if (rc < 0)
896 return rc;
897
898 /* In case PCI is scanned before ACPI re-trigger memdev attach */
899 cxl_bus_rescan();
900 return 0;
901}
902
903static const struct acpi_device_id cxl_acpi_ids[] = {
904 { "ACPI0017" },
905 { },
906};
907MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
908
909static const struct platform_device_id cxl_test_ids[] = {
910 { "cxl_acpi" },
911 { },
912};
913MODULE_DEVICE_TABLE(platform, cxl_test_ids);
914
915static struct platform_driver cxl_acpi_driver = {
916 .probe = cxl_acpi_probe,
917 .driver = {
918 .name = KBUILD_MODNAME,
919 .acpi_match_table = cxl_acpi_ids,
920 },
921 .id_table = cxl_test_ids,
922};
923
924static int __init cxl_acpi_init(void)
925{
926 return platform_driver_register(&cxl_acpi_driver);
927}
928
929static void __exit cxl_acpi_exit(void)
930{
931 platform_driver_unregister(&cxl_acpi_driver);
932 cxl_bus_drain();
933}
934
935/* load before dax_hmem sees 'Soft Reserved' CXL ranges */
936subsys_initcall(cxl_acpi_init);
937
938/*
939 * Arrange for host-bridge ports to be active synchronous with
940 * cxl_acpi_probe() exit.
941 */
942MODULE_SOFTDEP("pre: cxl_port");
943
944module_exit(cxl_acpi_exit);
945MODULE_DESCRIPTION("CXL ACPI: Platform Support");
946MODULE_LICENSE("GPL v2");
947MODULE_IMPORT_NS("CXL");
948MODULE_IMPORT_NS("ACPI");
949

source code of linux/drivers/cxl/acpi.c