1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * PTP 1588 clock support - character device implementation.
4 *
5 * Copyright (C) 2010 OMICRON electronics GmbH
6 */
7 #include <linux/compat.h>
8 #include <linux/module.h>
9 #include <linux/posix-clock.h>
10 #include <linux/poll.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/timekeeping.h>
14 #include <linux/debugfs.h>
15
16 #include <linux/nospec.h>
17
18 #include "ptp_private.h"
19
ptp_disable_pinfunc(struct ptp_clock_info * ops,enum ptp_pin_function func,unsigned int chan)20 static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
21 enum ptp_pin_function func, unsigned int chan)
22 {
23 struct ptp_clock_request rq;
24 int err = 0;
25
26 memset(&rq, 0, sizeof(rq));
27
28 switch (func) {
29 case PTP_PF_NONE:
30 break;
31 case PTP_PF_EXTTS:
32 rq.type = PTP_CLK_REQ_EXTTS;
33 rq.extts.index = chan;
34 err = ops->enable(ops, &rq, 0);
35 break;
36 case PTP_PF_PEROUT:
37 rq.type = PTP_CLK_REQ_PEROUT;
38 rq.perout.index = chan;
39 err = ops->enable(ops, &rq, 0);
40 break;
41 case PTP_PF_PHYSYNC:
42 break;
43 default:
44 return -EINVAL;
45 }
46
47 return err;
48 }
49
ptp_set_pinfunc(struct ptp_clock * ptp,unsigned int pin,enum ptp_pin_function func,unsigned int chan)50 int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
51 enum ptp_pin_function func, unsigned int chan)
52 {
53 struct ptp_clock_info *info = ptp->info;
54 struct ptp_pin_desc *pin1 = NULL, *pin2 = &info->pin_config[pin];
55 unsigned int i;
56
57 /* Check to see if any other pin previously had this function. */
58 for (i = 0; i < info->n_pins; i++) {
59 if (info->pin_config[i].func == func &&
60 info->pin_config[i].chan == chan) {
61 pin1 = &info->pin_config[i];
62 break;
63 }
64 }
65 if (pin1 && i == pin)
66 return 0;
67
68 /* Check the desired function and channel. */
69 switch (func) {
70 case PTP_PF_NONE:
71 break;
72 case PTP_PF_EXTTS:
73 if (chan >= info->n_ext_ts)
74 return -EINVAL;
75 break;
76 case PTP_PF_PEROUT:
77 if (chan >= info->n_per_out)
78 return -EINVAL;
79 break;
80 case PTP_PF_PHYSYNC:
81 if (chan != 0)
82 return -EINVAL;
83 break;
84 default:
85 return -EINVAL;
86 }
87
88 if (info->verify(info, pin, func, chan)) {
89 pr_err("driver cannot use function %u and channel %u on pin %u\n",
90 func, chan, pin);
91 return -EOPNOTSUPP;
92 }
93
94 /* Disable whatever function was previously assigned. */
95 if (pin1) {
96 ptp_disable_pinfunc(info, func, chan);
97 pin1->func = PTP_PF_NONE;
98 pin1->chan = 0;
99 }
100 ptp_disable_pinfunc(info, pin2->func, pin2->chan);
101 pin2->func = func;
102 pin2->chan = chan;
103
104 return 0;
105 }
106
ptp_open(struct posix_clock_context * pccontext,fmode_t fmode)107 int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
108 {
109 struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
110 struct timestamp_event_queue *queue;
111 char debugfsname[32];
112
113 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
114 if (!queue)
115 return -EINVAL;
116 queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
117 if (!queue->mask) {
118 kfree(queue);
119 return -EINVAL;
120 }
121 bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
122 spin_lock_init(&queue->lock);
123 scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
124 list_add_tail(&queue->qlist, &ptp->tsevqs);
125 pccontext->private_clkdata = queue;
126
127 /* Debugfs contents */
128 sprintf(debugfsname, "0x%p", queue);
129 queue->debugfs_instance =
130 debugfs_create_dir(debugfsname, ptp->debugfs_root);
131 queue->dfs_bitmap.array = (u32 *)queue->mask;
132 queue->dfs_bitmap.n_elements =
133 DIV_ROUND_UP(PTP_MAX_CHANNELS, BITS_PER_BYTE * sizeof(u32));
134 debugfs_create_u32_array("mask", 0444, queue->debugfs_instance,
135 &queue->dfs_bitmap);
136
137 return 0;
138 }
139
ptp_release(struct posix_clock_context * pccontext)140 int ptp_release(struct posix_clock_context *pccontext)
141 {
142 struct timestamp_event_queue *queue = pccontext->private_clkdata;
143 struct ptp_clock *ptp =
144 container_of(pccontext->clk, struct ptp_clock, clock);
145
146 debugfs_remove(queue->debugfs_instance);
147 pccontext->private_clkdata = NULL;
148 scoped_guard(spinlock_irq, &ptp->tsevqs_lock)
149 list_del(&queue->qlist);
150 bitmap_free(queue->mask);
151 kfree(queue);
152 return 0;
153 }
154
ptp_clock_getcaps(struct ptp_clock * ptp,void __user * arg)155 static long ptp_clock_getcaps(struct ptp_clock *ptp, void __user *arg)
156 {
157 struct ptp_clock_caps caps = {
158 .max_adj = ptp->info->max_adj,
159 .n_alarm = ptp->info->n_alarm,
160 .n_ext_ts = ptp->info->n_ext_ts,
161 .n_per_out = ptp->info->n_per_out,
162 .pps = ptp->info->pps,
163 .n_pins = ptp->info->n_pins,
164 .cross_timestamping = ptp->info->getcrosststamp != NULL,
165 .adjust_phase = ptp->info->adjphase != NULL &&
166 ptp->info->getmaxphase != NULL,
167 };
168
169 if (caps.adjust_phase)
170 caps.max_phase_adj = ptp->info->getmaxphase(ptp->info);
171
172 return copy_to_user(arg, &caps, sizeof(caps)) ? -EFAULT : 0;
173 }
174
ptp_extts_request(struct ptp_clock * ptp,unsigned int cmd,void __user * arg)175 static long ptp_extts_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
176 {
177 struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS };
178 struct ptp_clock_info *ops = ptp->info;
179 unsigned int supported_extts_flags;
180
181 if (copy_from_user(&req.extts, arg, sizeof(req.extts)))
182 return -EFAULT;
183
184 if (cmd == PTP_EXTTS_REQUEST2) {
185 /* Tell the drivers to check the flags carefully. */
186 req.extts.flags |= PTP_STRICT_FLAGS;
187 /* Make sure no reserved bit is set. */
188 if ((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
189 req.extts.rsv[0] || req.extts.rsv[1])
190 return -EINVAL;
191
192 /* Ensure one of the rising/falling edge bits is set. */
193 if ((req.extts.flags & PTP_ENABLE_FEATURE) &&
194 (req.extts.flags & PTP_EXTTS_EDGES) == 0)
195 return -EINVAL;
196 } else {
197 req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
198 memset(req.extts.rsv, 0, sizeof(req.extts.rsv));
199 }
200
201 if (req.extts.index >= ops->n_ext_ts)
202 return -EINVAL;
203
204 supported_extts_flags = ptp->info->supported_extts_flags;
205 /* The PTP_ENABLE_FEATURE flag is always supported. */
206 supported_extts_flags |= PTP_ENABLE_FEATURE;
207 /* If the driver does not support strictly checking flags, the
208 * PTP_RISING_EDGE and PTP_FALLING_EDGE flags are merely hints
209 * which are not enforced.
210 */
211 if (!(supported_extts_flags & PTP_STRICT_FLAGS))
212 supported_extts_flags |= PTP_EXTTS_EDGES;
213 /* Reject unsupported flags */
214 if (req.extts.flags & ~supported_extts_flags)
215 return -EOPNOTSUPP;
216
217 scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
218 return ops->enable(ops, &req, req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0);
219 }
220
ptp_perout_request(struct ptp_clock * ptp,unsigned int cmd,void __user * arg)221 static long ptp_perout_request(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
222 {
223 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT };
224 struct ptp_perout_request *perout = &req.perout;
225 struct ptp_clock_info *ops = ptp->info;
226
227 if (copy_from_user(perout, arg, sizeof(*perout)))
228 return -EFAULT;
229
230 if (cmd == PTP_PEROUT_REQUEST2) {
231 if (perout->flags & ~PTP_PEROUT_VALID_FLAGS)
232 return -EINVAL;
233
234 /*
235 * The "on" field has undefined meaning if
236 * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat it
237 * as reserved, which must be set to zero.
238 */
239 if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
240 !mem_is_zero(perout->rsv, sizeof(perout->rsv)))
241 return -EINVAL;
242
243 if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
244 /* The duty cycle must be subunitary. */
245 if (perout->on.sec > perout->period.sec ||
246 (perout->on.sec == perout->period.sec &&
247 perout->on.nsec > perout->period.nsec))
248 return -ERANGE;
249 }
250
251 if (perout->flags & PTP_PEROUT_PHASE) {
252 /*
253 * The phase should be specified modulo the period,
254 * therefore anything equal or larger than 1 period
255 * is invalid.
256 */
257 if (perout->phase.sec > perout->period.sec ||
258 (perout->phase.sec == perout->period.sec &&
259 perout->phase.nsec >= perout->period.nsec))
260 return -ERANGE;
261 }
262 } else {
263 perout->flags &= PTP_PEROUT_V1_VALID_FLAGS;
264 memset(perout->rsv, 0, sizeof(perout->rsv));
265 }
266
267 if (perout->index >= ops->n_per_out)
268 return -EINVAL;
269 if (perout->flags & ~ops->supported_perout_flags)
270 return -EOPNOTSUPP;
271
272 scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
273 return ops->enable(ops, &req, perout->period.sec || perout->period.nsec);
274 }
275
ptp_enable_pps(struct ptp_clock * ptp,bool enable)276 static long ptp_enable_pps(struct ptp_clock *ptp, bool enable)
277 {
278 struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS };
279 struct ptp_clock_info *ops = ptp->info;
280
281 if (!capable(CAP_SYS_TIME))
282 return -EPERM;
283
284 scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
285 return ops->enable(ops, &req, enable);
286 }
287
ptp_sys_offset_precise(struct ptp_clock * ptp,void __user * arg)288 static long ptp_sys_offset_precise(struct ptp_clock *ptp, void __user *arg)
289 {
290 struct ptp_sys_offset_precise precise_offset;
291 struct system_device_crosststamp xtstamp;
292 struct timespec64 ts;
293 int err;
294
295 if (!ptp->info->getcrosststamp)
296 return -EOPNOTSUPP;
297
298 err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
299 if (err)
300 return err;
301
302 memset(&precise_offset, 0, sizeof(precise_offset));
303 ts = ktime_to_timespec64(xtstamp.device);
304 precise_offset.device.sec = ts.tv_sec;
305 precise_offset.device.nsec = ts.tv_nsec;
306 ts = ktime_to_timespec64(xtstamp.sys_realtime);
307 precise_offset.sys_realtime.sec = ts.tv_sec;
308 precise_offset.sys_realtime.nsec = ts.tv_nsec;
309 ts = ktime_to_timespec64(xtstamp.sys_monoraw);
310 precise_offset.sys_monoraw.sec = ts.tv_sec;
311 precise_offset.sys_monoraw.nsec = ts.tv_nsec;
312
313 return copy_to_user(arg, &precise_offset, sizeof(precise_offset)) ? -EFAULT : 0;
314 }
315
ptp_sys_offset_extended(struct ptp_clock * ptp,void __user * arg)316 static long ptp_sys_offset_extended(struct ptp_clock *ptp, void __user *arg)
317 {
318 struct ptp_sys_offset_extended *extoff __free(kfree) = NULL;
319 struct ptp_system_timestamp sts;
320
321 if (!ptp->info->gettimex64)
322 return -EOPNOTSUPP;
323
324 extoff = memdup_user(arg, sizeof(*extoff));
325 if (IS_ERR(extoff))
326 return PTR_ERR(extoff);
327
328 if (extoff->n_samples > PTP_MAX_SAMPLES || extoff->rsv[0] || extoff->rsv[1])
329 return -EINVAL;
330
331 switch (extoff->clockid) {
332 case CLOCK_REALTIME:
333 case CLOCK_MONOTONIC:
334 case CLOCK_MONOTONIC_RAW:
335 break;
336 case CLOCK_AUX ... CLOCK_AUX_LAST:
337 if (IS_ENABLED(CONFIG_POSIX_AUX_CLOCKS))
338 break;
339 fallthrough;
340 default:
341 return -EINVAL;
342 }
343
344 sts.clockid = extoff->clockid;
345 for (unsigned int i = 0; i < extoff->n_samples; i++) {
346 struct timespec64 ts;
347 int err;
348
349 err = ptp->info->gettimex64(ptp->info, &ts, &sts);
350 if (err)
351 return err;
352
353 /* Filter out disabled or unavailable clocks */
354 if (sts.pre_ts.tv_sec < 0 || sts.post_ts.tv_sec < 0)
355 return -EINVAL;
356
357 extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
358 extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
359 extoff->ts[i][1].sec = ts.tv_sec;
360 extoff->ts[i][1].nsec = ts.tv_nsec;
361 extoff->ts[i][2].sec = sts.post_ts.tv_sec;
362 extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
363 }
364
365 return copy_to_user(arg, extoff, sizeof(*extoff)) ? -EFAULT : 0;
366 }
367
ptp_sys_offset(struct ptp_clock * ptp,void __user * arg)368 static long ptp_sys_offset(struct ptp_clock *ptp, void __user *arg)
369 {
370 struct ptp_sys_offset *sysoff __free(kfree) = NULL;
371 struct ptp_clock_time *pct;
372 struct timespec64 ts;
373
374 sysoff = memdup_user(arg, sizeof(*sysoff));
375 if (IS_ERR(sysoff))
376 return PTR_ERR(sysoff);
377
378 if (sysoff->n_samples > PTP_MAX_SAMPLES)
379 return -EINVAL;
380
381 pct = &sysoff->ts[0];
382 for (unsigned int i = 0; i < sysoff->n_samples; i++) {
383 struct ptp_clock_info *ops = ptp->info;
384 int err;
385
386 ktime_get_real_ts64(&ts);
387 pct->sec = ts.tv_sec;
388 pct->nsec = ts.tv_nsec;
389 pct++;
390 if (ops->gettimex64)
391 err = ops->gettimex64(ops, &ts, NULL);
392 else
393 err = ops->gettime64(ops, &ts);
394 if (err)
395 return err;
396 pct->sec = ts.tv_sec;
397 pct->nsec = ts.tv_nsec;
398 pct++;
399 }
400 ktime_get_real_ts64(&ts);
401 pct->sec = ts.tv_sec;
402 pct->nsec = ts.tv_nsec;
403
404 return copy_to_user(arg, sysoff, sizeof(*sysoff)) ? -EFAULT : 0;
405 }
406
ptp_pin_getfunc(struct ptp_clock * ptp,unsigned int cmd,void __user * arg)407 static long ptp_pin_getfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
408 {
409 struct ptp_clock_info *ops = ptp->info;
410 struct ptp_pin_desc pd;
411
412 if (copy_from_user(&pd, arg, sizeof(pd)))
413 return -EFAULT;
414
415 if (cmd == PTP_PIN_GETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
416 return -EINVAL;
417
418 if (pd.index >= ops->n_pins)
419 return -EINVAL;
420
421 scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
422 pd = ops->pin_config[array_index_nospec(pd.index, ops->n_pins)];
423
424 return copy_to_user(arg, &pd, sizeof(pd)) ? -EFAULT : 0;
425 }
426
ptp_pin_setfunc(struct ptp_clock * ptp,unsigned int cmd,void __user * arg)427 static long ptp_pin_setfunc(struct ptp_clock *ptp, unsigned int cmd, void __user *arg)
428 {
429 struct ptp_clock_info *ops = ptp->info;
430 struct ptp_pin_desc pd;
431 unsigned int pin_index;
432
433 if (copy_from_user(&pd, arg, sizeof(pd)))
434 return -EFAULT;
435
436 if (cmd == PTP_PIN_SETFUNC2 && !mem_is_zero(pd.rsv, sizeof(pd.rsv)))
437 return -EINVAL;
438
439 if (pd.index >= ops->n_pins)
440 return -EINVAL;
441
442 pin_index = array_index_nospec(pd.index, ops->n_pins);
443 scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &ptp->pincfg_mux)
444 return ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
445 }
446
ptp_mask_clear_all(struct timestamp_event_queue * tsevq)447 static long ptp_mask_clear_all(struct timestamp_event_queue *tsevq)
448 {
449 bitmap_clear(tsevq->mask, 0, PTP_MAX_CHANNELS);
450 return 0;
451 }
452
ptp_mask_en_single(struct timestamp_event_queue * tsevq,void __user * arg)453 static long ptp_mask_en_single(struct timestamp_event_queue *tsevq, void __user *arg)
454 {
455 unsigned int channel;
456
457 if (copy_from_user(&channel, arg, sizeof(channel)))
458 return -EFAULT;
459 if (channel >= PTP_MAX_CHANNELS)
460 return -EFAULT;
461 set_bit(channel, tsevq->mask);
462 return 0;
463 }
464
ptp_ioctl(struct posix_clock_context * pccontext,unsigned int cmd,unsigned long arg)465 long ptp_ioctl(struct posix_clock_context *pccontext, unsigned int cmd,
466 unsigned long arg)
467 {
468 struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
469 void __user *argptr;
470
471 if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2)
472 arg = (unsigned long)compat_ptr(arg);
473 argptr = (void __force __user *)arg;
474
475 switch (cmd) {
476 case PTP_CLOCK_GETCAPS:
477 case PTP_CLOCK_GETCAPS2:
478 return ptp_clock_getcaps(ptp, argptr);
479
480 case PTP_EXTTS_REQUEST:
481 case PTP_EXTTS_REQUEST2:
482 if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
483 return -EACCES;
484 return ptp_extts_request(ptp, cmd, argptr);
485
486 case PTP_PEROUT_REQUEST:
487 case PTP_PEROUT_REQUEST2:
488 if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
489 return -EACCES;
490 return ptp_perout_request(ptp, cmd, argptr);
491
492 case PTP_ENABLE_PPS:
493 case PTP_ENABLE_PPS2:
494 if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
495 return -EACCES;
496 return ptp_enable_pps(ptp, !!arg);
497
498 case PTP_SYS_OFFSET_PRECISE:
499 case PTP_SYS_OFFSET_PRECISE2:
500 return ptp_sys_offset_precise(ptp, argptr);
501
502 case PTP_SYS_OFFSET_EXTENDED:
503 case PTP_SYS_OFFSET_EXTENDED2:
504 return ptp_sys_offset_extended(ptp, argptr);
505
506 case PTP_SYS_OFFSET:
507 case PTP_SYS_OFFSET2:
508 return ptp_sys_offset(ptp, argptr);
509
510 case PTP_PIN_GETFUNC:
511 case PTP_PIN_GETFUNC2:
512 return ptp_pin_getfunc(ptp, cmd, argptr);
513
514 case PTP_PIN_SETFUNC:
515 case PTP_PIN_SETFUNC2:
516 if ((pccontext->fp->f_mode & FMODE_WRITE) == 0)
517 return -EACCES;
518 return ptp_pin_setfunc(ptp, cmd, argptr);
519
520 case PTP_MASK_CLEAR_ALL:
521 return ptp_mask_clear_all(pccontext->private_clkdata);
522
523 case PTP_MASK_EN_SINGLE:
524 return ptp_mask_en_single(pccontext->private_clkdata, argptr);
525
526 default:
527 return -ENOTTY;
528 }
529 }
530
ptp_poll(struct posix_clock_context * pccontext,struct file * fp,poll_table * wait)531 __poll_t ptp_poll(struct posix_clock_context *pccontext, struct file *fp,
532 poll_table *wait)
533 {
534 struct ptp_clock *ptp =
535 container_of(pccontext->clk, struct ptp_clock, clock);
536 struct timestamp_event_queue *queue;
537
538 queue = pccontext->private_clkdata;
539 if (!queue)
540 return EPOLLERR;
541
542 poll_wait(fp, &ptp->tsev_wq, wait);
543
544 return queue_cnt(queue) ? EPOLLIN : 0;
545 }
546
547 #define EXTTS_BUFSIZE (PTP_BUF_TIMESTAMPS * sizeof(struct ptp_extts_event))
548
ptp_read(struct posix_clock_context * pccontext,uint rdflags,char __user * buf,size_t cnt)549 ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
550 char __user *buf, size_t cnt)
551 {
552 struct ptp_clock *ptp = container_of(pccontext->clk, struct ptp_clock, clock);
553 struct timestamp_event_queue *queue;
554 struct ptp_extts_event *event;
555 ssize_t result;
556
557 queue = pccontext->private_clkdata;
558 if (!queue)
559 return -EINVAL;
560
561 if (cnt % sizeof(*event) != 0)
562 return -EINVAL;
563
564 if (cnt > EXTTS_BUFSIZE)
565 cnt = EXTTS_BUFSIZE;
566
567 if (wait_event_interruptible(ptp->tsev_wq, ptp->defunct || queue_cnt(queue)))
568 return -ERESTARTSYS;
569
570 if (ptp->defunct)
571 return -ENODEV;
572
573 event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
574 if (!event)
575 return -ENOMEM;
576
577 scoped_guard(spinlock_irq, &queue->lock) {
578 size_t qcnt = min((size_t)queue_cnt(queue), cnt / sizeof(*event));
579
580 for (size_t i = 0; i < qcnt; i++) {
581 event[i] = queue->buf[queue->head];
582 /* Paired with READ_ONCE() in queue_cnt() */
583 WRITE_ONCE(queue->head, (queue->head + 1) % PTP_MAX_TIMESTAMPS);
584 }
585 cnt = qcnt * sizeof(*event);
586 }
587
588 result = cnt;
589 if (copy_to_user(buf, event, cnt))
590 result = -EFAULT;
591
592 kfree(event);
593 return result;
594 }
595