1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * nested.c - nested mode translation support |
4 | * |
5 | * Copyright (C) 2023 Intel Corporation |
6 | * |
7 | * Author: Lu Baolu <baolu.lu@linux.intel.com> |
8 | * Jacob Pan <jacob.jun.pan@linux.intel.com> |
9 | * Yi Liu <yi.l.liu@intel.com> |
10 | */ |
11 | |
12 | #define pr_fmt(fmt) "DMAR: " fmt |
13 | |
14 | #include <linux/iommu.h> |
15 | #include <linux/pci.h> |
16 | #include <linux/pci-ats.h> |
17 | |
18 | #include "iommu.h" |
19 | #include "pasid.h" |
20 | |
21 | static int intel_nested_attach_dev(struct iommu_domain *domain, |
22 | struct device *dev) |
23 | { |
24 | struct device_domain_info *info = dev_iommu_priv_get(dev); |
25 | struct dmar_domain *dmar_domain = to_dmar_domain(dom: domain); |
26 | struct intel_iommu *iommu = info->iommu; |
27 | unsigned long flags; |
28 | int ret = 0; |
29 | |
30 | if (info->domain) |
31 | device_block_translation(dev); |
32 | |
33 | if (iommu->agaw < dmar_domain->s2_domain->agaw) { |
34 | dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n" ); |
35 | return -ENODEV; |
36 | } |
37 | |
38 | /* |
39 | * Stage-1 domain cannot work alone, it is nested on a s2_domain. |
40 | * The s2_domain will be used in nested translation, hence needs |
41 | * to ensure the s2_domain is compatible with this IOMMU. |
42 | */ |
43 | ret = prepare_domain_attach_device(domain: &dmar_domain->s2_domain->domain, dev); |
44 | if (ret) { |
45 | dev_err_ratelimited(dev, "s2 domain is not compatible\n" ); |
46 | return ret; |
47 | } |
48 | |
49 | ret = domain_attach_iommu(domain: dmar_domain, iommu); |
50 | if (ret) { |
51 | dev_err_ratelimited(dev, "Failed to attach domain to iommu\n" ); |
52 | return ret; |
53 | } |
54 | |
55 | ret = intel_pasid_setup_nested(iommu, dev, |
56 | IOMMU_NO_PASID, domain: dmar_domain); |
57 | if (ret) { |
58 | domain_detach_iommu(domain: dmar_domain, iommu); |
59 | dev_err_ratelimited(dev, "Failed to setup pasid entry\n" ); |
60 | return ret; |
61 | } |
62 | |
63 | info->domain = dmar_domain; |
64 | spin_lock_irqsave(&dmar_domain->lock, flags); |
65 | list_add(new: &info->link, head: &dmar_domain->devices); |
66 | spin_unlock_irqrestore(lock: &dmar_domain->lock, flags); |
67 | |
68 | domain_update_iotlb(domain: dmar_domain); |
69 | |
70 | return 0; |
71 | } |
72 | |
73 | static void intel_nested_domain_free(struct iommu_domain *domain) |
74 | { |
75 | struct dmar_domain *dmar_domain = to_dmar_domain(dom: domain); |
76 | struct dmar_domain *s2_domain = dmar_domain->s2_domain; |
77 | |
78 | spin_lock(lock: &s2_domain->s1_lock); |
79 | list_del(entry: &dmar_domain->s2_link); |
80 | spin_unlock(lock: &s2_domain->s1_lock); |
81 | kfree(objp: dmar_domain); |
82 | } |
83 | |
84 | static void nested_flush_dev_iotlb(struct dmar_domain *domain, u64 addr, |
85 | unsigned int mask) |
86 | { |
87 | struct device_domain_info *info; |
88 | unsigned long flags; |
89 | u16 sid, qdep; |
90 | |
91 | spin_lock_irqsave(&domain->lock, flags); |
92 | list_for_each_entry(info, &domain->devices, link) { |
93 | if (!info->ats_enabled) |
94 | continue; |
95 | sid = info->bus << 8 | info->devfn; |
96 | qdep = info->ats_qdep; |
97 | qi_flush_dev_iotlb(iommu: info->iommu, sid, pfsid: info->pfsid, |
98 | qdep, addr, mask); |
99 | quirk_extra_dev_tlb_flush(info, address: addr, pages: mask, |
100 | IOMMU_NO_PASID, qdep); |
101 | } |
102 | spin_unlock_irqrestore(lock: &domain->lock, flags); |
103 | } |
104 | |
105 | static void intel_nested_flush_cache(struct dmar_domain *domain, u64 addr, |
106 | u64 npages, bool ih) |
107 | { |
108 | struct iommu_domain_info *info; |
109 | unsigned int mask; |
110 | unsigned long i; |
111 | |
112 | xa_for_each(&domain->iommu_array, i, info) |
113 | qi_flush_piotlb(iommu: info->iommu, |
114 | did: domain_id_iommu(domain, iommu: info->iommu), |
115 | IOMMU_NO_PASID, addr, npages, ih); |
116 | |
117 | if (!domain->has_iotlb_device) |
118 | return; |
119 | |
120 | if (npages == U64_MAX) |
121 | mask = 64 - VTD_PAGE_SHIFT; |
122 | else |
123 | mask = ilog2(__roundup_pow_of_two(npages)); |
124 | |
125 | nested_flush_dev_iotlb(domain, addr, mask); |
126 | } |
127 | |
128 | static int intel_nested_cache_invalidate_user(struct iommu_domain *domain, |
129 | struct iommu_user_data_array *array) |
130 | { |
131 | struct dmar_domain *dmar_domain = to_dmar_domain(dom: domain); |
132 | struct iommu_hwpt_vtd_s1_invalidate inv_entry; |
133 | u32 index, processed = 0; |
134 | int ret = 0; |
135 | |
136 | if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) { |
137 | ret = -EINVAL; |
138 | goto out; |
139 | } |
140 | |
141 | for (index = 0; index < array->entry_num; index++) { |
142 | ret = iommu_copy_struct_from_user_array(&inv_entry, array, |
143 | IOMMU_HWPT_INVALIDATE_DATA_VTD_S1, |
144 | index, __reserved); |
145 | if (ret) |
146 | break; |
147 | |
148 | if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) || |
149 | inv_entry.__reserved) { |
150 | ret = -EOPNOTSUPP; |
151 | break; |
152 | } |
153 | |
154 | if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) || |
155 | ((inv_entry.npages == U64_MAX) && inv_entry.addr)) { |
156 | ret = -EINVAL; |
157 | break; |
158 | } |
159 | |
160 | intel_nested_flush_cache(domain: dmar_domain, addr: inv_entry.addr, |
161 | npages: inv_entry.npages, |
162 | ih: inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF); |
163 | processed++; |
164 | } |
165 | |
166 | out: |
167 | array->entry_num = processed; |
168 | return ret; |
169 | } |
170 | |
171 | static const struct iommu_domain_ops intel_nested_domain_ops = { |
172 | .attach_dev = intel_nested_attach_dev, |
173 | .free = intel_nested_domain_free, |
174 | .cache_invalidate_user = intel_nested_cache_invalidate_user, |
175 | }; |
176 | |
177 | struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent, |
178 | const struct iommu_user_data *user_data) |
179 | { |
180 | struct dmar_domain *s2_domain = to_dmar_domain(dom: parent); |
181 | struct iommu_hwpt_vtd_s1 vtd; |
182 | struct dmar_domain *domain; |
183 | int ret; |
184 | |
185 | /* Must be nested domain */ |
186 | if (user_data->type != IOMMU_HWPT_DATA_VTD_S1) |
187 | return ERR_PTR(error: -EOPNOTSUPP); |
188 | if (parent->ops != intel_iommu_ops.default_domain_ops || |
189 | !s2_domain->nested_parent) |
190 | return ERR_PTR(error: -EINVAL); |
191 | |
192 | ret = iommu_copy_struct_from_user(&vtd, user_data, |
193 | IOMMU_HWPT_DATA_VTD_S1, __reserved); |
194 | if (ret) |
195 | return ERR_PTR(error: ret); |
196 | |
197 | domain = kzalloc(size: sizeof(*domain), GFP_KERNEL_ACCOUNT); |
198 | if (!domain) |
199 | return ERR_PTR(error: -ENOMEM); |
200 | |
201 | domain->use_first_level = true; |
202 | domain->s2_domain = s2_domain; |
203 | domain->s1_pgtbl = vtd.pgtbl_addr; |
204 | domain->s1_cfg = vtd; |
205 | domain->domain.ops = &intel_nested_domain_ops; |
206 | domain->domain.type = IOMMU_DOMAIN_NESTED; |
207 | INIT_LIST_HEAD(list: &domain->devices); |
208 | INIT_LIST_HEAD(list: &domain->dev_pasids); |
209 | spin_lock_init(&domain->lock); |
210 | xa_init(xa: &domain->iommu_array); |
211 | |
212 | spin_lock(lock: &s2_domain->s1_lock); |
213 | list_add(new: &domain->s2_link, head: &s2_domain->s1_domains); |
214 | spin_unlock(lock: &s2_domain->s1_lock); |
215 | |
216 | return &domain->domain; |
217 | } |
218 | |