1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Intel & MS High Precision Event Timer Implementation. |
4 | * |
5 | * Copyright (C) 2003 Intel Corporation |
6 | * Venki Pallipadi |
7 | * (c) Copyright 2004 Hewlett-Packard Development Company, L.P. |
8 | * Bob Picco <robert.picco@hp.com> |
9 | */ |
10 | |
11 | #include <linux/interrupt.h> |
12 | #include <linux/kernel.h> |
13 | #include <linux/types.h> |
14 | #include <linux/miscdevice.h> |
15 | #include <linux/major.h> |
16 | #include <linux/ioport.h> |
17 | #include <linux/fcntl.h> |
18 | #include <linux/init.h> |
19 | #include <linux/io-64-nonatomic-lo-hi.h> |
20 | #include <linux/poll.h> |
21 | #include <linux/mm.h> |
22 | #include <linux/proc_fs.h> |
23 | #include <linux/spinlock.h> |
24 | #include <linux/sysctl.h> |
25 | #include <linux/wait.h> |
26 | #include <linux/sched/signal.h> |
27 | #include <linux/bcd.h> |
28 | #include <linux/seq_file.h> |
29 | #include <linux/bitops.h> |
30 | #include <linux/compat.h> |
31 | #include <linux/clocksource.h> |
32 | #include <linux/uaccess.h> |
33 | #include <linux/slab.h> |
34 | #include <linux/io.h> |
35 | #include <linux/acpi.h> |
36 | #include <linux/hpet.h> |
37 | #include <asm/current.h> |
38 | #include <asm/irq.h> |
39 | #include <asm/div64.h> |
40 | |
41 | /* |
42 | * The High Precision Event Timer driver. |
43 | * This driver is closely modelled after the rtc.c driver. |
44 | * See HPET spec revision 1. |
45 | */ |
46 | #define HPET_USER_FREQ (64) |
47 | #define HPET_DRIFT (500) |
48 | |
49 | #define HPET_RANGE_SIZE 1024 /* from HPET spec */ |
50 | |
51 | |
52 | /* WARNING -- don't get confused. These macros are never used |
53 | * to write the (single) counter, and rarely to read it. |
54 | * They're badly named; to fix, someday. |
55 | */ |
56 | #if BITS_PER_LONG == 64 |
57 | #define write_counter(V, MC) writeq(V, MC) |
58 | #define read_counter(MC) readq(MC) |
59 | #else |
60 | #define write_counter(V, MC) writel(V, MC) |
61 | #define read_counter(MC) readl(MC) |
62 | #endif |
63 | |
64 | static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ |
65 | static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; |
66 | |
67 | /* A lock for concurrent access by app and isr hpet activity. */ |
68 | static DEFINE_SPINLOCK(hpet_lock); |
69 | |
70 | #define HPET_DEV_NAME (7) |
71 | |
72 | struct hpet_dev { |
73 | struct hpets *hd_hpets; |
74 | struct hpet __iomem *hd_hpet; |
75 | struct hpet_timer __iomem *hd_timer; |
76 | unsigned long hd_ireqfreq; |
77 | unsigned long hd_irqdata; |
78 | wait_queue_head_t hd_waitqueue; |
79 | struct fasync_struct *hd_async_queue; |
80 | unsigned int hd_flags; |
81 | unsigned int hd_irq; |
82 | unsigned int hd_hdwirq; |
83 | char hd_name[HPET_DEV_NAME]; |
84 | }; |
85 | |
86 | struct hpets { |
87 | struct hpets *hp_next; |
88 | struct hpet __iomem *hp_hpet; |
89 | unsigned long hp_hpet_phys; |
90 | unsigned long long hp_tick_freq; |
91 | unsigned long hp_delta; |
92 | unsigned int hp_ntimer; |
93 | unsigned int hp_which; |
94 | struct hpet_dev hp_dev[] __counted_by(hp_ntimer); |
95 | }; |
96 | |
97 | static struct hpets *hpets; |
98 | |
99 | #define HPET_OPEN 0x0001 |
100 | #define HPET_IE 0x0002 /* interrupt enabled */ |
101 | #define HPET_PERIODIC 0x0004 |
102 | #define HPET_SHARED_IRQ 0x0008 |
103 | |
104 | static irqreturn_t hpet_interrupt(int irq, void *data) |
105 | { |
106 | struct hpet_dev *devp; |
107 | unsigned long isr; |
108 | |
109 | devp = data; |
110 | isr = 1 << (devp - devp->hd_hpets->hp_dev); |
111 | |
112 | if ((devp->hd_flags & HPET_SHARED_IRQ) && |
113 | !(isr & readl(addr: &devp->hd_hpet->hpet_isr))) |
114 | return IRQ_NONE; |
115 | |
116 | spin_lock(lock: &hpet_lock); |
117 | devp->hd_irqdata++; |
118 | |
119 | /* |
120 | * For non-periodic timers, increment the accumulator. |
121 | * This has the effect of treating non-periodic like periodic. |
122 | */ |
123 | if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { |
124 | unsigned long t, mc, base, k; |
125 | struct hpet __iomem *hpet = devp->hd_hpet; |
126 | struct hpets *hpetp = devp->hd_hpets; |
127 | |
128 | t = devp->hd_ireqfreq; |
129 | read_counter(&devp->hd_timer->hpet_compare); |
130 | mc = read_counter(&hpet->hpet_mc); |
131 | /* The time for the next interrupt would logically be t + m, |
132 | * however, if we are very unlucky and the interrupt is delayed |
133 | * for longer than t then we will completely miss the next |
134 | * interrupt if we set t + m and an application will hang. |
135 | * Therefore we need to make a more complex computation assuming |
136 | * that there exists a k for which the following is true: |
137 | * k * t + base < mc + delta |
138 | * (k + 1) * t + base > mc + delta |
139 | * where t is the interval in hpet ticks for the given freq, |
140 | * base is the theoretical start value 0 < base < t, |
141 | * mc is the main counter value at the time of the interrupt, |
142 | * delta is the time it takes to write the a value to the |
143 | * comparator. |
144 | * k may then be computed as (mc - base + delta) / t . |
145 | */ |
146 | base = mc % t; |
147 | k = (mc - base + hpetp->hp_delta) / t; |
148 | write_counter(t * (k + 1) + base, |
149 | &devp->hd_timer->hpet_compare); |
150 | } |
151 | |
152 | if (devp->hd_flags & HPET_SHARED_IRQ) |
153 | writel(val: isr, addr: &devp->hd_hpet->hpet_isr); |
154 | spin_unlock(lock: &hpet_lock); |
155 | |
156 | wake_up_interruptible(&devp->hd_waitqueue); |
157 | |
158 | kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN); |
159 | |
160 | return IRQ_HANDLED; |
161 | } |
162 | |
163 | static void hpet_timer_set_irq(struct hpet_dev *devp) |
164 | { |
165 | const unsigned int nr_irqs = irq_get_nr_irqs(); |
166 | unsigned long v; |
167 | int irq, gsi; |
168 | struct hpet_timer __iomem *timer; |
169 | |
170 | spin_lock_irq(lock: &hpet_lock); |
171 | if (devp->hd_hdwirq) { |
172 | spin_unlock_irq(lock: &hpet_lock); |
173 | return; |
174 | } |
175 | |
176 | timer = devp->hd_timer; |
177 | |
178 | /* we prefer level triggered mode */ |
179 | v = readl(addr: &timer->hpet_config); |
180 | if (!(v & Tn_INT_TYPE_CNF_MASK)) { |
181 | v |= Tn_INT_TYPE_CNF_MASK; |
182 | writel(val: v, addr: &timer->hpet_config); |
183 | } |
184 | spin_unlock_irq(lock: &hpet_lock); |
185 | |
186 | v = (readq(addr: &timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >> |
187 | Tn_INT_ROUTE_CAP_SHIFT; |
188 | |
189 | /* |
190 | * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by |
191 | * legacy device. In IO APIC mode, we skip all the legacy IRQS. |
192 | */ |
193 | if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) |
194 | v &= ~0xf3df; |
195 | else |
196 | v &= ~0xffff; |
197 | |
198 | for_each_set_bit(irq, &v, HPET_MAX_IRQ) { |
199 | if (irq >= nr_irqs) { |
200 | irq = HPET_MAX_IRQ; |
201 | break; |
202 | } |
203 | |
204 | gsi = acpi_register_gsi(NULL, gsi: irq, ACPI_LEVEL_SENSITIVE, |
205 | ACPI_ACTIVE_LOW); |
206 | if (gsi > 0) |
207 | break; |
208 | |
209 | /* FIXME: Setup interrupt source table */ |
210 | } |
211 | |
212 | if (irq < HPET_MAX_IRQ) { |
213 | spin_lock_irq(lock: &hpet_lock); |
214 | v = readl(addr: &timer->hpet_config); |
215 | v |= irq << Tn_INT_ROUTE_CNF_SHIFT; |
216 | writel(val: v, addr: &timer->hpet_config); |
217 | devp->hd_hdwirq = gsi; |
218 | spin_unlock_irq(lock: &hpet_lock); |
219 | } |
220 | return; |
221 | } |
222 | |
223 | static int hpet_open(struct inode *inode, struct file *file) |
224 | { |
225 | struct hpet_dev *devp; |
226 | struct hpets *hpetp; |
227 | int i; |
228 | |
229 | if (file->f_mode & FMODE_WRITE) |
230 | return -EINVAL; |
231 | |
232 | mutex_lock(&hpet_mutex); |
233 | spin_lock_irq(lock: &hpet_lock); |
234 | |
235 | for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) |
236 | for (i = 0; i < hpetp->hp_ntimer; i++) |
237 | if (hpetp->hp_dev[i].hd_flags & HPET_OPEN) { |
238 | continue; |
239 | } else { |
240 | devp = &hpetp->hp_dev[i]; |
241 | break; |
242 | } |
243 | |
244 | if (!devp) { |
245 | spin_unlock_irq(lock: &hpet_lock); |
246 | mutex_unlock(lock: &hpet_mutex); |
247 | return -EBUSY; |
248 | } |
249 | |
250 | file->private_data = devp; |
251 | devp->hd_irqdata = 0; |
252 | devp->hd_flags |= HPET_OPEN; |
253 | spin_unlock_irq(lock: &hpet_lock); |
254 | mutex_unlock(lock: &hpet_mutex); |
255 | |
256 | hpet_timer_set_irq(devp); |
257 | |
258 | return 0; |
259 | } |
260 | |
261 | static ssize_t |
262 | hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) |
263 | { |
264 | DECLARE_WAITQUEUE(wait, current); |
265 | unsigned long data; |
266 | ssize_t retval; |
267 | struct hpet_dev *devp; |
268 | |
269 | devp = file->private_data; |
270 | if (!devp->hd_ireqfreq) |
271 | return -EIO; |
272 | |
273 | if (in_compat_syscall()) { |
274 | if (count < sizeof(compat_ulong_t)) |
275 | return -EINVAL; |
276 | } else { |
277 | if (count < sizeof(unsigned long)) |
278 | return -EINVAL; |
279 | } |
280 | |
281 | add_wait_queue(wq_head: &devp->hd_waitqueue, wq_entry: &wait); |
282 | |
283 | for ( ; ; ) { |
284 | set_current_state(TASK_INTERRUPTIBLE); |
285 | |
286 | spin_lock_irq(lock: &hpet_lock); |
287 | data = devp->hd_irqdata; |
288 | devp->hd_irqdata = 0; |
289 | spin_unlock_irq(lock: &hpet_lock); |
290 | |
291 | if (data) { |
292 | break; |
293 | } else if (file->f_flags & O_NONBLOCK) { |
294 | retval = -EAGAIN; |
295 | goto out; |
296 | } else if (signal_pending(current)) { |
297 | retval = -ERESTARTSYS; |
298 | goto out; |
299 | } |
300 | schedule(); |
301 | } |
302 | |
303 | if (in_compat_syscall()) { |
304 | retval = put_user(data, (compat_ulong_t __user *)buf); |
305 | if (!retval) |
306 | retval = sizeof(compat_ulong_t); |
307 | } else { |
308 | retval = put_user(data, (unsigned long __user *)buf); |
309 | if (!retval) |
310 | retval = sizeof(unsigned long); |
311 | } |
312 | |
313 | out: |
314 | __set_current_state(TASK_RUNNING); |
315 | remove_wait_queue(wq_head: &devp->hd_waitqueue, wq_entry: &wait); |
316 | |
317 | return retval; |
318 | } |
319 | |
320 | static __poll_t hpet_poll(struct file *file, poll_table * wait) |
321 | { |
322 | unsigned long v; |
323 | struct hpet_dev *devp; |
324 | |
325 | devp = file->private_data; |
326 | |
327 | if (!devp->hd_ireqfreq) |
328 | return 0; |
329 | |
330 | poll_wait(filp: file, wait_address: &devp->hd_waitqueue, p: wait); |
331 | |
332 | spin_lock_irq(lock: &hpet_lock); |
333 | v = devp->hd_irqdata; |
334 | spin_unlock_irq(lock: &hpet_lock); |
335 | |
336 | if (v != 0) |
337 | return EPOLLIN | EPOLLRDNORM; |
338 | |
339 | return 0; |
340 | } |
341 | |
342 | #ifdef CONFIG_HPET_MMAP |
343 | #ifdef CONFIG_HPET_MMAP_DEFAULT |
344 | static int hpet_mmap_enabled = 1; |
345 | #else |
346 | static int hpet_mmap_enabled = 0; |
347 | #endif |
348 | |
349 | static __init int hpet_mmap_enable(char *str) |
350 | { |
351 | get_option(str: &str, pint: &hpet_mmap_enabled); |
352 | pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled": "disabled"); |
353 | return 1; |
354 | } |
355 | __setup("hpet_mmap=", hpet_mmap_enable); |
356 | |
357 | static int hpet_mmap(struct file *file, struct vm_area_struct *vma) |
358 | { |
359 | struct hpet_dev *devp; |
360 | unsigned long addr; |
361 | |
362 | if (!hpet_mmap_enabled) |
363 | return -EACCES; |
364 | |
365 | devp = file->private_data; |
366 | addr = devp->hd_hpets->hp_hpet_phys; |
367 | |
368 | if (addr & (PAGE_SIZE - 1)) |
369 | return -ENOSYS; |
370 | |
371 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); |
372 | return vm_iomap_memory(vma, start: addr, PAGE_SIZE); |
373 | } |
374 | #else |
375 | static int hpet_mmap(struct file *file, struct vm_area_struct *vma) |
376 | { |
377 | return -ENOSYS; |
378 | } |
379 | #endif |
380 | |
381 | static int hpet_fasync(int fd, struct file *file, int on) |
382 | { |
383 | struct hpet_dev *devp; |
384 | |
385 | devp = file->private_data; |
386 | |
387 | if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0) |
388 | return 0; |
389 | else |
390 | return -EIO; |
391 | } |
392 | |
393 | static int hpet_release(struct inode *inode, struct file *file) |
394 | { |
395 | struct hpet_dev *devp; |
396 | struct hpet_timer __iomem *timer; |
397 | int irq = 0; |
398 | |
399 | devp = file->private_data; |
400 | timer = devp->hd_timer; |
401 | |
402 | spin_lock_irq(lock: &hpet_lock); |
403 | |
404 | writeq(val: (readq(addr: &timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK), |
405 | addr: &timer->hpet_config); |
406 | |
407 | irq = devp->hd_irq; |
408 | devp->hd_irq = 0; |
409 | |
410 | devp->hd_ireqfreq = 0; |
411 | |
412 | if (devp->hd_flags & HPET_PERIODIC |
413 | && readq(addr: &timer->hpet_config) & Tn_TYPE_CNF_MASK) { |
414 | unsigned long v; |
415 | |
416 | v = readq(addr: &timer->hpet_config); |
417 | v ^= Tn_TYPE_CNF_MASK; |
418 | writeq(val: v, addr: &timer->hpet_config); |
419 | } |
420 | |
421 | devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC); |
422 | spin_unlock_irq(lock: &hpet_lock); |
423 | |
424 | if (irq) |
425 | free_irq(irq, devp); |
426 | |
427 | file->private_data = NULL; |
428 | return 0; |
429 | } |
430 | |
431 | static int hpet_ioctl_ieon(struct hpet_dev *devp) |
432 | { |
433 | struct hpet_timer __iomem *timer; |
434 | struct hpet __iomem *hpet; |
435 | struct hpets *hpetp; |
436 | int irq; |
437 | unsigned long g, v, t, m; |
438 | unsigned long flags, isr; |
439 | |
440 | timer = devp->hd_timer; |
441 | hpet = devp->hd_hpet; |
442 | hpetp = devp->hd_hpets; |
443 | |
444 | if (!devp->hd_ireqfreq) |
445 | return -EIO; |
446 | |
447 | spin_lock_irq(lock: &hpet_lock); |
448 | |
449 | if (devp->hd_flags & HPET_IE) { |
450 | spin_unlock_irq(lock: &hpet_lock); |
451 | return -EBUSY; |
452 | } |
453 | |
454 | devp->hd_flags |= HPET_IE; |
455 | |
456 | if (readl(addr: &timer->hpet_config) & Tn_INT_TYPE_CNF_MASK) |
457 | devp->hd_flags |= HPET_SHARED_IRQ; |
458 | spin_unlock_irq(lock: &hpet_lock); |
459 | |
460 | irq = devp->hd_hdwirq; |
461 | |
462 | if (irq) { |
463 | unsigned long irq_flags; |
464 | |
465 | if (devp->hd_flags & HPET_SHARED_IRQ) { |
466 | /* |
467 | * To prevent the interrupt handler from seeing an |
468 | * unwanted interrupt status bit, program the timer |
469 | * so that it will not fire in the near future ... |
470 | */ |
471 | writel(readl(addr: &timer->hpet_config) & ~Tn_TYPE_CNF_MASK, |
472 | addr: &timer->hpet_config); |
473 | write_counter(read_counter(&hpet->hpet_mc), |
474 | &timer->hpet_compare); |
475 | /* ... and clear any left-over status. */ |
476 | isr = 1 << (devp - devp->hd_hpets->hp_dev); |
477 | writel(val: isr, addr: &hpet->hpet_isr); |
478 | } |
479 | |
480 | sprintf(buf: devp->hd_name, fmt: "hpet%d", (int)(devp - hpetp->hp_dev)); |
481 | irq_flags = devp->hd_flags & HPET_SHARED_IRQ ? IRQF_SHARED : 0; |
482 | if (request_irq(irq, handler: hpet_interrupt, flags: irq_flags, |
483 | name: devp->hd_name, dev: (void *)devp)) { |
484 | printk(KERN_ERR "hpet: IRQ %d is not free\n", irq); |
485 | irq = 0; |
486 | } |
487 | } |
488 | |
489 | if (irq == 0) { |
490 | spin_lock_irq(lock: &hpet_lock); |
491 | devp->hd_flags ^= HPET_IE; |
492 | spin_unlock_irq(lock: &hpet_lock); |
493 | return -EIO; |
494 | } |
495 | |
496 | devp->hd_irq = irq; |
497 | t = devp->hd_ireqfreq; |
498 | v = readq(addr: &timer->hpet_config); |
499 | |
500 | /* 64-bit comparators are not yet supported through the ioctls, |
501 | * so force this into 32-bit mode if it supports both modes |
502 | */ |
503 | g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK; |
504 | |
505 | if (devp->hd_flags & HPET_PERIODIC) { |
506 | g |= Tn_TYPE_CNF_MASK; |
507 | v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK; |
508 | writeq(val: v, addr: &timer->hpet_config); |
509 | local_irq_save(flags); |
510 | |
511 | /* |
512 | * NOTE: First we modify the hidden accumulator |
513 | * register supported by periodic-capable comparators. |
514 | * We never want to modify the (single) counter; that |
515 | * would affect all the comparators. The value written |
516 | * is the counter value when the first interrupt is due. |
517 | */ |
518 | m = read_counter(&hpet->hpet_mc); |
519 | write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); |
520 | /* |
521 | * Then we modify the comparator, indicating the period |
522 | * for subsequent interrupt. |
523 | */ |
524 | write_counter(t, &timer->hpet_compare); |
525 | } else { |
526 | local_irq_save(flags); |
527 | m = read_counter(&hpet->hpet_mc); |
528 | write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); |
529 | } |
530 | |
531 | if (devp->hd_flags & HPET_SHARED_IRQ) { |
532 | isr = 1 << (devp - devp->hd_hpets->hp_dev); |
533 | writel(val: isr, addr: &hpet->hpet_isr); |
534 | } |
535 | writeq(val: g, addr: &timer->hpet_config); |
536 | local_irq_restore(flags); |
537 | |
538 | return 0; |
539 | } |
540 | |
541 | /* converts Hz to number of timer ticks */ |
542 | static inline unsigned long hpet_time_div(struct hpets *hpets, |
543 | unsigned long dis) |
544 | { |
545 | unsigned long long m; |
546 | |
547 | m = hpets->hp_tick_freq + (dis >> 1); |
548 | return div64_ul(m, dis); |
549 | } |
550 | |
551 | static int |
552 | hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, |
553 | struct hpet_info *info) |
554 | { |
555 | struct hpet_timer __iomem *timer; |
556 | struct hpets *hpetp; |
557 | int err; |
558 | unsigned long v; |
559 | |
560 | switch (cmd) { |
561 | case HPET_IE_OFF: |
562 | case HPET_INFO: |
563 | case HPET_EPI: |
564 | case HPET_DPI: |
565 | case HPET_IRQFREQ: |
566 | timer = devp->hd_timer; |
567 | hpetp = devp->hd_hpets; |
568 | break; |
569 | case HPET_IE_ON: |
570 | return hpet_ioctl_ieon(devp); |
571 | default: |
572 | return -EINVAL; |
573 | } |
574 | |
575 | err = 0; |
576 | |
577 | switch (cmd) { |
578 | case HPET_IE_OFF: |
579 | if ((devp->hd_flags & HPET_IE) == 0) |
580 | break; |
581 | v = readq(addr: &timer->hpet_config); |
582 | v &= ~Tn_INT_ENB_CNF_MASK; |
583 | writeq(val: v, addr: &timer->hpet_config); |
584 | if (devp->hd_irq) { |
585 | free_irq(devp->hd_irq, devp); |
586 | devp->hd_irq = 0; |
587 | } |
588 | devp->hd_flags ^= HPET_IE; |
589 | break; |
590 | case HPET_INFO: |
591 | { |
592 | memset(info, 0, sizeof(*info)); |
593 | if (devp->hd_ireqfreq) |
594 | info->hi_ireqfreq = |
595 | hpet_time_div(hpets: hpetp, dis: devp->hd_ireqfreq); |
596 | info->hi_flags = |
597 | readq(addr: &timer->hpet_config) & Tn_PER_INT_CAP_MASK; |
598 | info->hi_hpet = hpetp->hp_which; |
599 | info->hi_timer = devp - hpetp->hp_dev; |
600 | break; |
601 | } |
602 | case HPET_EPI: |
603 | v = readq(addr: &timer->hpet_config); |
604 | if ((v & Tn_PER_INT_CAP_MASK) == 0) { |
605 | err = -ENXIO; |
606 | break; |
607 | } |
608 | devp->hd_flags |= HPET_PERIODIC; |
609 | break; |
610 | case HPET_DPI: |
611 | v = readq(addr: &timer->hpet_config); |
612 | if ((v & Tn_PER_INT_CAP_MASK) == 0) { |
613 | err = -ENXIO; |
614 | break; |
615 | } |
616 | if (devp->hd_flags & HPET_PERIODIC && |
617 | readq(addr: &timer->hpet_config) & Tn_TYPE_CNF_MASK) { |
618 | v = readq(addr: &timer->hpet_config); |
619 | v ^= Tn_TYPE_CNF_MASK; |
620 | writeq(val: v, addr: &timer->hpet_config); |
621 | } |
622 | devp->hd_flags &= ~HPET_PERIODIC; |
623 | break; |
624 | case HPET_IRQFREQ: |
625 | if ((arg > hpet_max_freq) && |
626 | !capable(CAP_SYS_RESOURCE)) { |
627 | err = -EACCES; |
628 | break; |
629 | } |
630 | |
631 | if (!arg) { |
632 | err = -EINVAL; |
633 | break; |
634 | } |
635 | |
636 | devp->hd_ireqfreq = hpet_time_div(hpets: hpetp, dis: arg); |
637 | } |
638 | |
639 | return err; |
640 | } |
641 | |
642 | static long |
643 | hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
644 | { |
645 | struct hpet_info info; |
646 | int err; |
647 | |
648 | mutex_lock(&hpet_mutex); |
649 | err = hpet_ioctl_common(devp: file->private_data, cmd, arg, info: &info); |
650 | mutex_unlock(lock: &hpet_mutex); |
651 | |
652 | if ((cmd == HPET_INFO) && !err && |
653 | (copy_to_user(to: (void __user *)arg, from: &info, n: sizeof(info)))) |
654 | err = -EFAULT; |
655 | |
656 | return err; |
657 | } |
658 | |
659 | #ifdef CONFIG_COMPAT |
660 | struct compat_hpet_info { |
661 | compat_ulong_t hi_ireqfreq; /* Hz */ |
662 | compat_ulong_t hi_flags; /* information */ |
663 | unsigned short hi_hpet; |
664 | unsigned short hi_timer; |
665 | }; |
666 | |
667 | /* 32-bit types would lead to different command codes which should be |
668 | * translated into 64-bit ones before passed to hpet_ioctl_common |
669 | */ |
670 | #define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) |
671 | #define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) |
672 | |
673 | static long |
674 | hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
675 | { |
676 | struct hpet_info info; |
677 | int err; |
678 | |
679 | if (cmd == COMPAT_HPET_INFO) |
680 | cmd = HPET_INFO; |
681 | |
682 | if (cmd == COMPAT_HPET_IRQFREQ) |
683 | cmd = HPET_IRQFREQ; |
684 | |
685 | mutex_lock(&hpet_mutex); |
686 | err = hpet_ioctl_common(devp: file->private_data, cmd, arg, info: &info); |
687 | mutex_unlock(lock: &hpet_mutex); |
688 | |
689 | if ((cmd == HPET_INFO) && !err) { |
690 | struct compat_hpet_info __user *u = compat_ptr(uptr: arg); |
691 | if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || |
692 | put_user(info.hi_flags, &u->hi_flags) || |
693 | put_user(info.hi_hpet, &u->hi_hpet) || |
694 | put_user(info.hi_timer, &u->hi_timer)) |
695 | err = -EFAULT; |
696 | } |
697 | |
698 | return err; |
699 | } |
700 | #endif |
701 | |
702 | static const struct file_operations hpet_fops = { |
703 | .owner = THIS_MODULE, |
704 | .read = hpet_read, |
705 | .poll = hpet_poll, |
706 | .unlocked_ioctl = hpet_ioctl, |
707 | #ifdef CONFIG_COMPAT |
708 | .compat_ioctl = hpet_compat_ioctl, |
709 | #endif |
710 | .open = hpet_open, |
711 | .release = hpet_release, |
712 | .fasync = hpet_fasync, |
713 | .mmap = hpet_mmap, |
714 | }; |
715 | |
716 | static int hpet_is_known(struct hpet_data *hdp) |
717 | { |
718 | struct hpets *hpetp; |
719 | |
720 | for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next) |
721 | if (hpetp->hp_hpet_phys == hdp->hd_phys_address) |
722 | return 1; |
723 | |
724 | return 0; |
725 | } |
726 | |
727 | static const struct ctl_table hpet_table[] = { |
728 | { |
729 | .procname = "max-user-freq", |
730 | .data = &hpet_max_freq, |
731 | .maxlen = sizeof(int), |
732 | .mode = 0644, |
733 | .proc_handler = proc_dointvec, |
734 | }, |
735 | }; |
736 | |
737 | static struct ctl_table_header *sysctl_header; |
738 | |
739 | /* |
740 | * Adjustment for when arming the timer with |
741 | * initial conditions. That is, main counter |
742 | * ticks expired before interrupts are enabled. |
743 | */ |
744 | #define TICK_CALIBRATE (1000UL) |
745 | |
746 | static unsigned long __hpet_calibrate(struct hpets *hpetp) |
747 | { |
748 | struct hpet_timer __iomem *timer = NULL; |
749 | unsigned long t, m, count, i, flags, start; |
750 | struct hpet_dev *devp; |
751 | int j; |
752 | struct hpet __iomem *hpet; |
753 | |
754 | for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++) |
755 | if ((devp->hd_flags & HPET_OPEN) == 0) { |
756 | timer = devp->hd_timer; |
757 | break; |
758 | } |
759 | |
760 | if (!timer) |
761 | return 0; |
762 | |
763 | hpet = hpetp->hp_hpet; |
764 | t = read_counter(&timer->hpet_compare); |
765 | |
766 | i = 0; |
767 | count = hpet_time_div(hpets: hpetp, TICK_CALIBRATE); |
768 | |
769 | local_irq_save(flags); |
770 | |
771 | start = read_counter(&hpet->hpet_mc); |
772 | |
773 | do { |
774 | m = read_counter(&hpet->hpet_mc); |
775 | write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare); |
776 | } while (i++, (m - start) < count); |
777 | |
778 | local_irq_restore(flags); |
779 | |
780 | return (m - start) / i; |
781 | } |
782 | |
783 | static unsigned long hpet_calibrate(struct hpets *hpetp) |
784 | { |
785 | unsigned long ret = ~0UL; |
786 | unsigned long tmp; |
787 | |
788 | /* |
789 | * Try to calibrate until return value becomes stable small value. |
790 | * If SMI interruption occurs in calibration loop, the return value |
791 | * will be big. This avoids its impact. |
792 | */ |
793 | for ( ; ; ) { |
794 | tmp = __hpet_calibrate(hpetp); |
795 | if (ret <= tmp) |
796 | break; |
797 | ret = tmp; |
798 | } |
799 | |
800 | return ret; |
801 | } |
802 | |
803 | int hpet_alloc(struct hpet_data *hdp) |
804 | { |
805 | u64 cap, mcfg; |
806 | struct hpet_dev *devp; |
807 | u32 i, ntimer; |
808 | struct hpets *hpetp; |
809 | struct hpet __iomem *hpet; |
810 | static struct hpets *last; |
811 | u32 period; |
812 | unsigned long long temp; |
813 | u32 remainder; |
814 | |
815 | /* |
816 | * hpet_alloc can be called by platform dependent code. |
817 | * If platform dependent code has allocated the hpet that |
818 | * ACPI has also reported, then we catch it here. |
819 | */ |
820 | if (hpet_is_known(hdp)) { |
821 | printk(KERN_DEBUG "%s: duplicate HPET ignored\n", |
822 | __func__); |
823 | return 0; |
824 | } |
825 | |
826 | hpetp = kzalloc(struct_size(hpetp, hp_dev, hdp->hd_nirqs), |
827 | GFP_KERNEL); |
828 | |
829 | if (!hpetp) |
830 | return -ENOMEM; |
831 | |
832 | hpetp->hp_which = hpet_nhpet++; |
833 | hpetp->hp_hpet = hdp->hd_address; |
834 | hpetp->hp_hpet_phys = hdp->hd_phys_address; |
835 | |
836 | hpetp->hp_ntimer = hdp->hd_nirqs; |
837 | |
838 | for (i = 0; i < hdp->hd_nirqs; i++) |
839 | hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i]; |
840 | |
841 | hpet = hpetp->hp_hpet; |
842 | |
843 | cap = readq(addr: &hpet->hpet_cap); |
844 | |
845 | ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1; |
846 | |
847 | if (hpetp->hp_ntimer != ntimer) { |
848 | printk(KERN_WARNING "hpet: number irqs doesn't agree" |
849 | " with number of timers\n"); |
850 | kfree(objp: hpetp); |
851 | return -ENODEV; |
852 | } |
853 | |
854 | if (last) |
855 | last->hp_next = hpetp; |
856 | else |
857 | hpets = hpetp; |
858 | |
859 | last = hpetp; |
860 | |
861 | period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >> |
862 | HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */ |
863 | temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */ |
864 | temp += period >> 1; /* round */ |
865 | do_div(temp, period); |
866 | hpetp->hp_tick_freq = temp; /* ticks per second */ |
867 | |
868 | printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s", |
869 | hpetp->hp_which, hdp->hd_phys_address, |
870 | hpetp->hp_ntimer > 1 ? "s": ""); |
871 | for (i = 0; i < hpetp->hp_ntimer; i++) |
872 | printk(KERN_CONT "%s %u", i > 0 ? ",": "", hdp->hd_irq[i]); |
873 | printk(KERN_CONT "\n"); |
874 | |
875 | temp = hpetp->hp_tick_freq; |
876 | remainder = do_div(temp, 1000000); |
877 | printk(KERN_INFO |
878 | "hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n", |
879 | hpetp->hp_which, hpetp->hp_ntimer, |
880 | cap & HPET_COUNTER_SIZE_MASK ? 64 : 32, |
881 | (unsigned) temp, remainder); |
882 | |
883 | mcfg = readq(addr: &hpet->hpet_config); |
884 | if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) { |
885 | write_counter(0L, &hpet->hpet_mc); |
886 | mcfg |= HPET_ENABLE_CNF_MASK; |
887 | writeq(val: mcfg, addr: &hpet->hpet_config); |
888 | } |
889 | |
890 | for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) { |
891 | struct hpet_timer __iomem *timer; |
892 | |
893 | timer = &hpet->hpet_timers[devp - hpetp->hp_dev]; |
894 | |
895 | devp->hd_hpets = hpetp; |
896 | devp->hd_hpet = hpet; |
897 | devp->hd_timer = timer; |
898 | |
899 | /* |
900 | * If the timer was reserved by platform code, |
901 | * then make timer unavailable for opens. |
902 | */ |
903 | if (hdp->hd_state & (1 << i)) { |
904 | devp->hd_flags = HPET_OPEN; |
905 | continue; |
906 | } |
907 | |
908 | init_waitqueue_head(&devp->hd_waitqueue); |
909 | } |
910 | |
911 | hpetp->hp_delta = hpet_calibrate(hpetp); |
912 | |
913 | return 0; |
914 | } |
915 | |
916 | static acpi_status hpet_resources(struct acpi_resource *res, void *data) |
917 | { |
918 | struct hpet_data *hdp; |
919 | acpi_status status; |
920 | struct acpi_resource_address64 addr; |
921 | |
922 | hdp = data; |
923 | |
924 | status = acpi_resource_to_address64(resource: res, out: &addr); |
925 | |
926 | if (ACPI_SUCCESS(status)) { |
927 | hdp->hd_phys_address = addr.address.minimum; |
928 | hdp->hd_address = ioremap(offset: addr.address.minimum, size: addr.address.address_length); |
929 | if (!hdp->hd_address) |
930 | return AE_ERROR; |
931 | |
932 | if (hpet_is_known(hdp)) { |
933 | iounmap(addr: hdp->hd_address); |
934 | return AE_ALREADY_EXISTS; |
935 | } |
936 | } else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) { |
937 | struct acpi_resource_fixed_memory32 *fixmem32; |
938 | |
939 | fixmem32 = &res->data.fixed_memory32; |
940 | |
941 | hdp->hd_phys_address = fixmem32->address; |
942 | hdp->hd_address = ioremap(offset: fixmem32->address, |
943 | HPET_RANGE_SIZE); |
944 | if (!hdp->hd_address) |
945 | return AE_ERROR; |
946 | |
947 | if (hpet_is_known(hdp)) { |
948 | iounmap(addr: hdp->hd_address); |
949 | return AE_ALREADY_EXISTS; |
950 | } |
951 | } else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) { |
952 | struct acpi_resource_extended_irq *irqp; |
953 | int i, irq; |
954 | |
955 | irqp = &res->data.extended_irq; |
956 | |
957 | for (i = 0; i < irqp->interrupt_count; i++) { |
958 | if (hdp->hd_nirqs >= HPET_MAX_TIMERS) |
959 | break; |
960 | |
961 | irq = acpi_register_gsi(NULL, gsi: irqp->interrupts[i], |
962 | triggering: irqp->triggering, |
963 | polarity: irqp->polarity); |
964 | if (irq < 0) |
965 | return AE_ERROR; |
966 | |
967 | hdp->hd_irq[hdp->hd_nirqs] = irq; |
968 | hdp->hd_nirqs++; |
969 | } |
970 | } |
971 | |
972 | return AE_OK; |
973 | } |
974 | |
975 | static int hpet_acpi_add(struct acpi_device *device) |
976 | { |
977 | acpi_status result; |
978 | struct hpet_data data; |
979 | |
980 | memset(&data, 0, sizeof(data)); |
981 | |
982 | result = |
983 | acpi_walk_resources(device: device->handle, METHOD_NAME__CRS, |
984 | user_function: hpet_resources, context: &data); |
985 | |
986 | if (ACPI_FAILURE(result)) |
987 | return -ENODEV; |
988 | |
989 | if (!data.hd_address || !data.hd_nirqs) { |
990 | if (data.hd_address) |
991 | iounmap(addr: data.hd_address); |
992 | printk("%s: no address or irqs in _CRS\n", __func__); |
993 | return -ENODEV; |
994 | } |
995 | |
996 | return hpet_alloc(hdp: &data); |
997 | } |
998 | |
999 | static const struct acpi_device_id hpet_device_ids[] = { |
1000 | {"PNP0103", 0}, |
1001 | {"", 0}, |
1002 | }; |
1003 | |
1004 | static struct acpi_driver hpet_acpi_driver = { |
1005 | .name = "hpet", |
1006 | .ids = hpet_device_ids, |
1007 | .ops = { |
1008 | .add = hpet_acpi_add, |
1009 | }, |
1010 | }; |
1011 | |
1012 | static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; |
1013 | |
1014 | static int __init hpet_init(void) |
1015 | { |
1016 | int result; |
1017 | |
1018 | result = misc_register(misc: &hpet_misc); |
1019 | if (result < 0) |
1020 | return -ENODEV; |
1021 | |
1022 | sysctl_header = register_sysctl("dev/hpet", hpet_table); |
1023 | |
1024 | result = acpi_bus_register_driver(&hpet_acpi_driver); |
1025 | if (result < 0) { |
1026 | unregister_sysctl_table(table: sysctl_header); |
1027 | misc_deregister(misc: &hpet_misc); |
1028 | return result; |
1029 | } |
1030 | |
1031 | return 0; |
1032 | } |
1033 | device_initcall(hpet_init); |
1034 | |
1035 | /* |
1036 | MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>"); |
1037 | MODULE_LICENSE("GPL"); |
1038 | */ |
1039 |
Definitions
- hpet_mutex
- hpet_nhpet
- hpet_max_freq
- hpet_lock
- hpet_dev
- hpets
- hpets
- hpet_interrupt
- hpet_timer_set_irq
- hpet_open
- hpet_read
- hpet_poll
- hpet_mmap_enabled
- hpet_mmap_enable
- hpet_mmap
- hpet_fasync
- hpet_release
- hpet_ioctl_ieon
- hpet_time_div
- hpet_ioctl_common
- hpet_ioctl
- compat_hpet_info
- hpet_compat_ioctl
- hpet_fops
- hpet_is_known
- hpet_table
- sysctl_header
- __hpet_calibrate
- hpet_calibrate
- hpet_alloc
- hpet_resources
- hpet_acpi_add
- hpet_device_ids
- hpet_acpi_driver
- hpet_misc
Improve your Profiling and Debugging skills
Find out more