1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Platform Monitory Technology Telemetry driver
4 *
5 * Copyright (c) 2020, Intel Corporation.
6 * All Rights Reserved.
7 *
8 * Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/io-64-nonatomic-lo-hi.h>
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/pci.h>
16
17 #include "../vsec.h"
18 #include "class.h"
19
20 #define PMT_XA_START 0
21 #define PMT_XA_MAX INT_MAX
22 #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX)
23 #define GUID_SPR_PUNIT 0x9956f43f
24
intel_pmt_is_early_client_hw(struct device * dev)25 bool intel_pmt_is_early_client_hw(struct device *dev)
26 {
27 struct intel_vsec_device *ivdev = dev_to_ivdev(dev);
28
29 /*
30 * Early implementations of PMT on client platforms have some
31 * differences from the server platforms (which use the Out Of Band
32 * Management Services Module OOBMSM).
33 */
34 return !!(ivdev->info->quirks & VSEC_QUIRK_EARLY_HW);
35 }
36 EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw);
37
38 static inline int
pmt_memcpy64_fromio(void * to,const u64 __iomem * from,size_t count)39 pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count)
40 {
41 int i, remain;
42 u64 *buf = to;
43
44 if (!IS_ALIGNED((unsigned long)from, 8))
45 return -EFAULT;
46
47 for (i = 0; i < count/8; i++)
48 buf[i] = readq(&from[i]);
49
50 /* Copy any remaining bytes */
51 remain = count % 8;
52 if (remain) {
53 u64 tmp = readq(&from[i]);
54
55 memcpy(&buf[i], &tmp, remain);
56 }
57
58 return count;
59 }
60
61 /*
62 * sysfs
63 */
64 static ssize_t
intel_pmt_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t off,size_t count)65 intel_pmt_read(struct file *filp, struct kobject *kobj,
66 struct bin_attribute *attr, char *buf, loff_t off,
67 size_t count)
68 {
69 struct intel_pmt_entry *entry = container_of(attr,
70 struct intel_pmt_entry,
71 pmt_bin_attr);
72
73 if (off < 0)
74 return -EINVAL;
75
76 if (off >= entry->size)
77 return 0;
78
79 if (count > entry->size - off)
80 count = entry->size - off;
81
82 if (entry->guid == GUID_SPR_PUNIT)
83 /* PUNIT on SPR only supports aligned 64-bit read */
84 count = pmt_memcpy64_fromio(buf, entry->base + off, count);
85 else
86 memcpy_fromio(buf, entry->base + off, count);
87
88 return count;
89 }
90
91 static int
intel_pmt_mmap(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,struct vm_area_struct * vma)92 intel_pmt_mmap(struct file *filp, struct kobject *kobj,
93 struct bin_attribute *attr, struct vm_area_struct *vma)
94 {
95 struct intel_pmt_entry *entry = container_of(attr,
96 struct intel_pmt_entry,
97 pmt_bin_attr);
98 unsigned long vsize = vma->vm_end - vma->vm_start;
99 struct device *dev = kobj_to_dev(kobj);
100 unsigned long phys = entry->base_addr;
101 unsigned long pfn = PFN_DOWN(phys);
102 unsigned long psize;
103
104 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
105 return -EROFS;
106
107 psize = (PFN_UP(entry->base_addr + entry->size) - pfn) * PAGE_SIZE;
108 if (vsize > psize) {
109 dev_err(dev, "Requested mmap size is too large\n");
110 return -EINVAL;
111 }
112
113 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
114 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
115 vsize, vma->vm_page_prot))
116 return -EAGAIN;
117
118 return 0;
119 }
120
121 static ssize_t
guid_show(struct device * dev,struct device_attribute * attr,char * buf)122 guid_show(struct device *dev, struct device_attribute *attr, char *buf)
123 {
124 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
125
126 return sprintf(buf, "0x%x\n", entry->guid);
127 }
128 static DEVICE_ATTR_RO(guid);
129
size_show(struct device * dev,struct device_attribute * attr,char * buf)130 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
131 char *buf)
132 {
133 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
134
135 return sprintf(buf, "%zu\n", entry->size);
136 }
137 static DEVICE_ATTR_RO(size);
138
139 static ssize_t
offset_show(struct device * dev,struct device_attribute * attr,char * buf)140 offset_show(struct device *dev, struct device_attribute *attr, char *buf)
141 {
142 struct intel_pmt_entry *entry = dev_get_drvdata(dev);
143
144 return sprintf(buf, "%lu\n", offset_in_page(entry->base_addr));
145 }
146 static DEVICE_ATTR_RO(offset);
147
148 static struct attribute *intel_pmt_attrs[] = {
149 &dev_attr_guid.attr,
150 &dev_attr_size.attr,
151 &dev_attr_offset.attr,
152 NULL
153 };
154 ATTRIBUTE_GROUPS(intel_pmt);
155
156 static struct class intel_pmt_class = {
157 .name = "intel_pmt",
158 .owner = THIS_MODULE,
159 .dev_groups = intel_pmt_groups,
160 };
161
intel_pmt_populate_entry(struct intel_pmt_entry * entry,struct intel_pmt_header * header,struct device * dev,struct resource * disc_res)162 static int intel_pmt_populate_entry(struct intel_pmt_entry *entry,
163 struct intel_pmt_header *header,
164 struct device *dev,
165 struct resource *disc_res)
166 {
167 struct pci_dev *pci_dev = to_pci_dev(dev->parent);
168 u8 bir;
169
170 /*
171 * The base offset should always be 8 byte aligned.
172 *
173 * For non-local access types the lower 3 bits of base offset
174 * contains the index of the base address register where the
175 * telemetry can be found.
176 */
177 bir = GET_BIR(header->base_offset);
178
179 /* Local access and BARID only for now */
180 switch (header->access_type) {
181 case ACCESS_LOCAL:
182 if (bir) {
183 dev_err(dev,
184 "Unsupported BAR index %d for access type %d\n",
185 bir, header->access_type);
186 return -EINVAL;
187 }
188 /*
189 * For access_type LOCAL, the base address is as follows:
190 * base address = end of discovery region + base offset
191 */
192 entry->base_addr = disc_res->end + 1 + header->base_offset;
193
194 /*
195 * Some hardware use a different calculation for the base address
196 * when access_type == ACCESS_LOCAL. On the these systems
197 * ACCCESS_LOCAL refers to an address in the same BAR as the
198 * header but at a fixed offset. But as the header address was
199 * supplied to the driver, we don't know which BAR it was in.
200 * So search for the bar whose range includes the header address.
201 */
202 if (intel_pmt_is_early_client_hw(dev)) {
203 int i;
204
205 entry->base_addr = 0;
206 for (i = 0; i < 6; i++)
207 if (disc_res->start >= pci_resource_start(pci_dev, i) &&
208 (disc_res->start <= pci_resource_end(pci_dev, i))) {
209 entry->base_addr = pci_resource_start(pci_dev, i) +
210 header->base_offset;
211 break;
212 }
213 if (!entry->base_addr)
214 return -EINVAL;
215 }
216
217 break;
218 case ACCESS_BARID:
219 /*
220 * If another BAR was specified then the base offset
221 * represents the offset within that BAR. SO retrieve the
222 * address from the parent PCI device and add offset.
223 */
224 entry->base_addr = pci_resource_start(pci_dev, bir) +
225 GET_ADDRESS(header->base_offset);
226 break;
227 default:
228 dev_err(dev, "Unsupported access type %d\n",
229 header->access_type);
230 return -EINVAL;
231 }
232
233 entry->guid = header->guid;
234 entry->size = header->size;
235
236 return 0;
237 }
238
intel_pmt_dev_register(struct intel_pmt_entry * entry,struct intel_pmt_namespace * ns,struct device * parent)239 static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
240 struct intel_pmt_namespace *ns,
241 struct device *parent)
242 {
243 struct resource res = {0};
244 struct device *dev;
245 int ret;
246
247 ret = xa_alloc(ns->xa, &entry->devid, entry, PMT_XA_LIMIT, GFP_KERNEL);
248 if (ret)
249 return ret;
250
251 dev = device_create(&intel_pmt_class, parent, MKDEV(0, 0), entry,
252 "%s%d", ns->name, entry->devid);
253
254 if (IS_ERR(dev)) {
255 dev_err(parent, "Could not create %s%d device node\n",
256 ns->name, entry->devid);
257 ret = PTR_ERR(dev);
258 goto fail_dev_create;
259 }
260
261 entry->kobj = &dev->kobj;
262
263 if (ns->attr_grp) {
264 ret = sysfs_create_group(entry->kobj, ns->attr_grp);
265 if (ret)
266 goto fail_sysfs;
267 }
268
269 /* if size is 0 assume no data buffer, so no file needed */
270 if (!entry->size)
271 return 0;
272
273 res.start = entry->base_addr;
274 res.end = res.start + entry->size - 1;
275 res.flags = IORESOURCE_MEM;
276
277 entry->base = devm_ioremap_resource(dev, &res);
278 if (IS_ERR(entry->base)) {
279 ret = PTR_ERR(entry->base);
280 goto fail_ioremap;
281 }
282
283 sysfs_bin_attr_init(&entry->pmt_bin_attr);
284 entry->pmt_bin_attr.attr.name = ns->name;
285 entry->pmt_bin_attr.attr.mode = 0440;
286 entry->pmt_bin_attr.mmap = intel_pmt_mmap;
287 entry->pmt_bin_attr.read = intel_pmt_read;
288 entry->pmt_bin_attr.size = entry->size;
289
290 ret = sysfs_create_bin_file(&dev->kobj, &entry->pmt_bin_attr);
291 if (!ret)
292 return 0;
293
294 fail_ioremap:
295 if (ns->attr_grp)
296 sysfs_remove_group(entry->kobj, ns->attr_grp);
297 fail_sysfs:
298 device_unregister(dev);
299 fail_dev_create:
300 xa_erase(ns->xa, entry->devid);
301
302 return ret;
303 }
304
intel_pmt_dev_create(struct intel_pmt_entry * entry,struct intel_pmt_namespace * ns,struct intel_vsec_device * intel_vsec_dev,int idx)305 int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
306 struct intel_vsec_device *intel_vsec_dev, int idx)
307 {
308 struct device *dev = &intel_vsec_dev->auxdev.dev;
309 struct intel_pmt_header header;
310 struct resource *disc_res;
311 int ret;
312
313 disc_res = &intel_vsec_dev->resource[idx];
314
315 entry->disc_table = devm_ioremap_resource(dev, disc_res);
316 if (IS_ERR(entry->disc_table))
317 return PTR_ERR(entry->disc_table);
318
319 ret = ns->pmt_header_decode(entry, &header, dev);
320 if (ret)
321 return ret;
322
323 ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
324 if (ret)
325 return ret;
326
327 return intel_pmt_dev_register(entry, ns, dev);
328
329 }
330 EXPORT_SYMBOL_GPL(intel_pmt_dev_create);
331
intel_pmt_dev_destroy(struct intel_pmt_entry * entry,struct intel_pmt_namespace * ns)332 void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
333 struct intel_pmt_namespace *ns)
334 {
335 struct device *dev = kobj_to_dev(entry->kobj);
336
337 if (entry->size)
338 sysfs_remove_bin_file(entry->kobj, &entry->pmt_bin_attr);
339
340 if (ns->attr_grp)
341 sysfs_remove_group(entry->kobj, ns->attr_grp);
342
343 device_unregister(dev);
344 xa_erase(ns->xa, entry->devid);
345 }
346 EXPORT_SYMBOL_GPL(intel_pmt_dev_destroy);
347
pmt_class_init(void)348 static int __init pmt_class_init(void)
349 {
350 return class_register(&intel_pmt_class);
351 }
352
pmt_class_exit(void)353 static void __exit pmt_class_exit(void)
354 {
355 class_unregister(&intel_pmt_class);
356 }
357
358 module_init(pmt_class_init);
359 module_exit(pmt_class_exit);
360
361 MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
362 MODULE_DESCRIPTION("Intel PMT Class driver");
363 MODULE_LICENSE("GPL v2");
364