1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright(C) 2016 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17 struct perf_output_handle *handle);
18
__tmc_etb_enable_hw(struct tmc_drvdata * drvdata)19 static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21 int rc = 0;
22
23 CS_UNLOCK(drvdata->base);
24
25 /* Wait for TMCSReady bit to be set */
26 rc = tmc_wait_for_tmcready(drvdata);
27 if (rc) {
28 dev_err(&drvdata->csdev->dev,
29 "Failed to enable: TMC not ready\n");
30 CS_LOCK(drvdata->base);
31 return rc;
32 }
33
34 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
35 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
36 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
37 TMC_FFCR_TRIGON_TRIGIN,
38 drvdata->base + TMC_FFCR);
39
40 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
41 tmc_enable_hw(drvdata);
42
43 CS_LOCK(drvdata->base);
44 return rc;
45 }
46
tmc_etb_enable_hw(struct tmc_drvdata * drvdata)47 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
48 {
49 int rc = coresight_claim_device(drvdata->csdev);
50
51 if (rc)
52 return rc;
53
54 rc = __tmc_etb_enable_hw(drvdata);
55 if (rc)
56 coresight_disclaim_device(drvdata->csdev);
57 return rc;
58 }
59
tmc_etb_dump_hw(struct tmc_drvdata * drvdata)60 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
61 {
62 char *bufp;
63 u32 read_data, lost;
64
65 /* Check if the buffer wrapped around. */
66 lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
67 bufp = drvdata->buf;
68 drvdata->len = 0;
69 while (1) {
70 read_data = readl_relaxed(drvdata->base + TMC_RRD);
71 if (read_data == 0xFFFFFFFF)
72 break;
73 memcpy(bufp, &read_data, 4);
74 bufp += 4;
75 drvdata->len += 4;
76 }
77
78 if (lost)
79 coresight_insert_barrier_packet(drvdata->buf);
80 return;
81 }
82
__tmc_etb_disable_hw(struct tmc_drvdata * drvdata)83 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
84 {
85 CS_UNLOCK(drvdata->base);
86
87 tmc_flush_and_stop(drvdata);
88 /*
89 * When operating in sysFS mode the content of the buffer needs to be
90 * read before the TMC is disabled.
91 */
92 if (drvdata->mode == CS_MODE_SYSFS)
93 tmc_etb_dump_hw(drvdata);
94 tmc_disable_hw(drvdata);
95
96 CS_LOCK(drvdata->base);
97 }
98
tmc_etb_disable_hw(struct tmc_drvdata * drvdata)99 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
100 {
101 __tmc_etb_disable_hw(drvdata);
102 coresight_disclaim_device(drvdata->csdev);
103 }
104
__tmc_etf_enable_hw(struct tmc_drvdata * drvdata)105 static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
106 {
107 int rc = 0;
108
109 CS_UNLOCK(drvdata->base);
110
111 /* Wait for TMCSReady bit to be set */
112 rc = tmc_wait_for_tmcready(drvdata);
113 if (rc) {
114 dev_err(&drvdata->csdev->dev,
115 "Failed to enable : TMC is not ready\n");
116 CS_LOCK(drvdata->base);
117 return rc;
118 }
119
120 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
121 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
122 drvdata->base + TMC_FFCR);
123 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
124 tmc_enable_hw(drvdata);
125
126 CS_LOCK(drvdata->base);
127 return rc;
128 }
129
tmc_etf_enable_hw(struct tmc_drvdata * drvdata)130 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
131 {
132 int rc = coresight_claim_device(drvdata->csdev);
133
134 if (rc)
135 return rc;
136
137 rc = __tmc_etf_enable_hw(drvdata);
138 if (rc)
139 coresight_disclaim_device(drvdata->csdev);
140 return rc;
141 }
142
tmc_etf_disable_hw(struct tmc_drvdata * drvdata)143 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
144 {
145 struct coresight_device *csdev = drvdata->csdev;
146
147 CS_UNLOCK(drvdata->base);
148
149 tmc_flush_and_stop(drvdata);
150 tmc_disable_hw(drvdata);
151 coresight_disclaim_device_unlocked(csdev);
152 CS_LOCK(drvdata->base);
153 }
154
155 /*
156 * Return the available trace data in the buffer from @pos, with
157 * a maximum limit of @len, updating the @bufpp on where to
158 * find it.
159 */
tmc_etb_get_sysfs_trace(struct tmc_drvdata * drvdata,loff_t pos,size_t len,char ** bufpp)160 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
161 loff_t pos, size_t len, char **bufpp)
162 {
163 ssize_t actual = len;
164
165 /* Adjust the len to available size @pos */
166 if (pos + actual > drvdata->len)
167 actual = drvdata->len - pos;
168 if (actual > 0)
169 *bufpp = drvdata->buf + pos;
170 return actual;
171 }
172
tmc_enable_etf_sink_sysfs(struct coresight_device * csdev)173 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
174 {
175 int ret = 0;
176 bool used = false;
177 char *buf = NULL;
178 unsigned long flags;
179 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
180
181 /*
182 * If we don't have a buffer release the lock and allocate memory.
183 * Otherwise keep the lock and move along.
184 */
185 spin_lock_irqsave(&drvdata->spinlock, flags);
186 if (!drvdata->buf) {
187 spin_unlock_irqrestore(&drvdata->spinlock, flags);
188
189 /* Allocating the memory here while outside of the spinlock */
190 buf = kzalloc(drvdata->size, GFP_KERNEL);
191 if (!buf)
192 return -ENOMEM;
193
194 /* Let's try again */
195 spin_lock_irqsave(&drvdata->spinlock, flags);
196 }
197
198 if (drvdata->reading) {
199 ret = -EBUSY;
200 goto out;
201 }
202
203 /*
204 * In sysFS mode we can have multiple writers per sink. Since this
205 * sink is already enabled no memory is needed and the HW need not be
206 * touched.
207 */
208 if (drvdata->mode == CS_MODE_SYSFS) {
209 atomic_inc(csdev->refcnt);
210 goto out;
211 }
212
213 /*
214 * If drvdata::buf isn't NULL, memory was allocated for a previous
215 * trace run but wasn't read. If so simply zero-out the memory.
216 * Otherwise use the memory allocated above.
217 *
218 * The memory is freed when users read the buffer using the
219 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
220 * details.
221 */
222 if (drvdata->buf) {
223 memset(drvdata->buf, 0, drvdata->size);
224 } else {
225 used = true;
226 drvdata->buf = buf;
227 }
228
229 ret = tmc_etb_enable_hw(drvdata);
230 if (!ret) {
231 drvdata->mode = CS_MODE_SYSFS;
232 atomic_inc(csdev->refcnt);
233 } else {
234 /* Free up the buffer if we failed to enable */
235 used = false;
236 }
237 out:
238 spin_unlock_irqrestore(&drvdata->spinlock, flags);
239
240 /* Free memory outside the spinlock if need be */
241 if (!used)
242 kfree(buf);
243
244 return ret;
245 }
246
tmc_enable_etf_sink_perf(struct coresight_device * csdev,void * data)247 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
248 {
249 int ret = 0;
250 pid_t pid;
251 unsigned long flags;
252 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
253 struct perf_output_handle *handle = data;
254 struct cs_buffers *buf = etm_perf_sink_config(handle);
255
256 spin_lock_irqsave(&drvdata->spinlock, flags);
257 do {
258 ret = -EINVAL;
259 if (drvdata->reading)
260 break;
261 /*
262 * No need to continue if the ETB/ETF is already operated
263 * from sysFS.
264 */
265 if (drvdata->mode == CS_MODE_SYSFS) {
266 ret = -EBUSY;
267 break;
268 }
269
270 /* Get a handle on the pid of the process to monitor */
271 pid = buf->pid;
272
273 if (drvdata->pid != -1 && drvdata->pid != pid) {
274 ret = -EBUSY;
275 break;
276 }
277
278 ret = tmc_set_etf_buffer(csdev, handle);
279 if (ret)
280 break;
281
282 /*
283 * No HW configuration is needed if the sink is already in
284 * use for this session.
285 */
286 if (drvdata->pid == pid) {
287 atomic_inc(csdev->refcnt);
288 break;
289 }
290
291 ret = tmc_etb_enable_hw(drvdata);
292 if (!ret) {
293 /* Associate with monitored process. */
294 drvdata->pid = pid;
295 drvdata->mode = CS_MODE_PERF;
296 atomic_inc(csdev->refcnt);
297 }
298 } while (0);
299 spin_unlock_irqrestore(&drvdata->spinlock, flags);
300
301 return ret;
302 }
303
tmc_enable_etf_sink(struct coresight_device * csdev,u32 mode,void * data)304 static int tmc_enable_etf_sink(struct coresight_device *csdev,
305 u32 mode, void *data)
306 {
307 int ret;
308
309 switch (mode) {
310 case CS_MODE_SYSFS:
311 ret = tmc_enable_etf_sink_sysfs(csdev);
312 break;
313 case CS_MODE_PERF:
314 ret = tmc_enable_etf_sink_perf(csdev, data);
315 break;
316 /* We shouldn't be here */
317 default:
318 ret = -EINVAL;
319 break;
320 }
321
322 if (ret)
323 return ret;
324
325 dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
326 return 0;
327 }
328
tmc_disable_etf_sink(struct coresight_device * csdev)329 static int tmc_disable_etf_sink(struct coresight_device *csdev)
330 {
331 unsigned long flags;
332 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
333
334 spin_lock_irqsave(&drvdata->spinlock, flags);
335
336 if (drvdata->reading) {
337 spin_unlock_irqrestore(&drvdata->spinlock, flags);
338 return -EBUSY;
339 }
340
341 if (atomic_dec_return(csdev->refcnt)) {
342 spin_unlock_irqrestore(&drvdata->spinlock, flags);
343 return -EBUSY;
344 }
345
346 /* Complain if we (somehow) got out of sync */
347 WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
348 tmc_etb_disable_hw(drvdata);
349 /* Dissociate from monitored process. */
350 drvdata->pid = -1;
351 drvdata->mode = CS_MODE_DISABLED;
352
353 spin_unlock_irqrestore(&drvdata->spinlock, flags);
354
355 dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
356 return 0;
357 }
358
tmc_enable_etf_link(struct coresight_device * csdev,int inport,int outport)359 static int tmc_enable_etf_link(struct coresight_device *csdev,
360 int inport, int outport)
361 {
362 int ret = 0;
363 unsigned long flags;
364 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
365 bool first_enable = false;
366
367 spin_lock_irqsave(&drvdata->spinlock, flags);
368 if (drvdata->reading) {
369 spin_unlock_irqrestore(&drvdata->spinlock, flags);
370 return -EBUSY;
371 }
372
373 if (atomic_read(&csdev->refcnt[0]) == 0) {
374 ret = tmc_etf_enable_hw(drvdata);
375 if (!ret) {
376 drvdata->mode = CS_MODE_SYSFS;
377 first_enable = true;
378 }
379 }
380 if (!ret)
381 atomic_inc(&csdev->refcnt[0]);
382 spin_unlock_irqrestore(&drvdata->spinlock, flags);
383
384 if (first_enable)
385 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
386 return ret;
387 }
388
tmc_disable_etf_link(struct coresight_device * csdev,int inport,int outport)389 static void tmc_disable_etf_link(struct coresight_device *csdev,
390 int inport, int outport)
391 {
392 unsigned long flags;
393 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
394 bool last_disable = false;
395
396 spin_lock_irqsave(&drvdata->spinlock, flags);
397 if (drvdata->reading) {
398 spin_unlock_irqrestore(&drvdata->spinlock, flags);
399 return;
400 }
401
402 if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
403 tmc_etf_disable_hw(drvdata);
404 drvdata->mode = CS_MODE_DISABLED;
405 last_disable = true;
406 }
407 spin_unlock_irqrestore(&drvdata->spinlock, flags);
408
409 if (last_disable)
410 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
411 }
412
tmc_alloc_etf_buffer(struct coresight_device * csdev,struct perf_event * event,void ** pages,int nr_pages,bool overwrite)413 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
414 struct perf_event *event, void **pages,
415 int nr_pages, bool overwrite)
416 {
417 int node;
418 struct cs_buffers *buf;
419
420 node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
421
422 /* Allocate memory structure for interaction with Perf */
423 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
424 if (!buf)
425 return NULL;
426
427 buf->pid = task_pid_nr(event->owner);
428 buf->snapshot = overwrite;
429 buf->nr_pages = nr_pages;
430 buf->data_pages = pages;
431
432 return buf;
433 }
434
tmc_free_etf_buffer(void * config)435 static void tmc_free_etf_buffer(void *config)
436 {
437 struct cs_buffers *buf = config;
438
439 kfree(buf);
440 }
441
tmc_set_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle)442 static int tmc_set_etf_buffer(struct coresight_device *csdev,
443 struct perf_output_handle *handle)
444 {
445 int ret = 0;
446 unsigned long head;
447 struct cs_buffers *buf = etm_perf_sink_config(handle);
448
449 if (!buf)
450 return -EINVAL;
451
452 /* wrap head around to the amount of space we have */
453 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
454
455 /* find the page to write to */
456 buf->cur = head / PAGE_SIZE;
457
458 /* and offset within that page */
459 buf->offset = head % PAGE_SIZE;
460
461 local_set(&buf->data_size, 0);
462
463 return ret;
464 }
465
tmc_update_etf_buffer(struct coresight_device * csdev,struct perf_output_handle * handle,void * sink_config)466 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
467 struct perf_output_handle *handle,
468 void *sink_config)
469 {
470 bool lost = false;
471 int i, cur;
472 const u32 *barrier;
473 u32 *buf_ptr;
474 u64 read_ptr, write_ptr;
475 u32 status;
476 unsigned long offset, to_read = 0, flags;
477 struct cs_buffers *buf = sink_config;
478 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
479
480 if (!buf)
481 return 0;
482
483 /* This shouldn't happen */
484 if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
485 return 0;
486
487 spin_lock_irqsave(&drvdata->spinlock, flags);
488
489 /* Don't do anything if another tracer is using this sink */
490 if (atomic_read(csdev->refcnt) != 1)
491 goto out;
492
493 CS_UNLOCK(drvdata->base);
494
495 tmc_flush_and_stop(drvdata);
496
497 read_ptr = tmc_read_rrp(drvdata);
498 write_ptr = tmc_read_rwp(drvdata);
499
500 /*
501 * Get a hold of the status register and see if a wrap around
502 * has occurred. If so adjust things accordingly.
503 */
504 status = readl_relaxed(drvdata->base + TMC_STS);
505 if (status & TMC_STS_FULL) {
506 lost = true;
507 to_read = drvdata->size;
508 } else {
509 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
510 }
511
512 /*
513 * The TMC RAM buffer may be bigger than the space available in the
514 * perf ring buffer (handle->size). If so advance the RRP so that we
515 * get the latest trace data. In snapshot mode none of that matters
516 * since we are expected to clobber stale data in favour of the latest
517 * traces.
518 */
519 if (!buf->snapshot && to_read > handle->size) {
520 u32 mask = tmc_get_memwidth_mask(drvdata);
521
522 /*
523 * Make sure the new size is aligned in accordance with the
524 * requirement explained in function tmc_get_memwidth_mask().
525 */
526 to_read = handle->size & mask;
527 /* Move the RAM read pointer up */
528 read_ptr = (write_ptr + drvdata->size) - to_read;
529 /* Make sure we are still within our limits */
530 if (read_ptr > (drvdata->size - 1))
531 read_ptr -= drvdata->size;
532 /* Tell the HW */
533 tmc_write_rrp(drvdata, read_ptr);
534 lost = true;
535 }
536
537 /*
538 * Don't set the TRUNCATED flag in snapshot mode because 1) the
539 * captured buffer is expected to be truncated and 2) a full buffer
540 * prevents the event from being re-enabled by the perf core,
541 * resulting in stale data being send to user space.
542 */
543 if (!buf->snapshot && lost)
544 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
545
546 cur = buf->cur;
547 offset = buf->offset;
548 barrier = coresight_barrier_pkt;
549
550 /* for every byte to read */
551 for (i = 0; i < to_read; i += 4) {
552 buf_ptr = buf->data_pages[cur] + offset;
553 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
554
555 if (lost && i < CORESIGHT_BARRIER_PKT_SIZE) {
556 *buf_ptr = *barrier;
557 barrier++;
558 }
559
560 offset += 4;
561 if (offset >= PAGE_SIZE) {
562 offset = 0;
563 cur++;
564 /* wrap around at the end of the buffer */
565 cur &= buf->nr_pages - 1;
566 }
567 }
568
569 /*
570 * In snapshot mode we simply increment the head by the number of byte
571 * that were written. User space will figure out how many bytes to get
572 * from the AUX buffer based on the position of the head.
573 */
574 if (buf->snapshot)
575 handle->head += to_read;
576
577 /*
578 * CS_LOCK() contains mb() so it can ensure visibility of the AUX trace
579 * data before the aux_head is updated via perf_aux_output_end(), which
580 * is expected by the perf ring buffer.
581 */
582 CS_LOCK(drvdata->base);
583 out:
584 spin_unlock_irqrestore(&drvdata->spinlock, flags);
585
586 return to_read;
587 }
588
589 static const struct coresight_ops_sink tmc_etf_sink_ops = {
590 .enable = tmc_enable_etf_sink,
591 .disable = tmc_disable_etf_sink,
592 .alloc_buffer = tmc_alloc_etf_buffer,
593 .free_buffer = tmc_free_etf_buffer,
594 .update_buffer = tmc_update_etf_buffer,
595 };
596
597 static const struct coresight_ops_link tmc_etf_link_ops = {
598 .enable = tmc_enable_etf_link,
599 .disable = tmc_disable_etf_link,
600 };
601
602 const struct coresight_ops tmc_etb_cs_ops = {
603 .sink_ops = &tmc_etf_sink_ops,
604 };
605
606 const struct coresight_ops tmc_etf_cs_ops = {
607 .sink_ops = &tmc_etf_sink_ops,
608 .link_ops = &tmc_etf_link_ops,
609 };
610
tmc_read_prepare_etb(struct tmc_drvdata * drvdata)611 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
612 {
613 enum tmc_mode mode;
614 int ret = 0;
615 unsigned long flags;
616
617 /* config types are set a boot time and never change */
618 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
619 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
620 return -EINVAL;
621
622 spin_lock_irqsave(&drvdata->spinlock, flags);
623
624 if (drvdata->reading) {
625 ret = -EBUSY;
626 goto out;
627 }
628
629 /* Don't interfere if operated from Perf */
630 if (drvdata->mode == CS_MODE_PERF) {
631 ret = -EINVAL;
632 goto out;
633 }
634
635 /* If drvdata::buf is NULL the trace data has been read already */
636 if (drvdata->buf == NULL) {
637 ret = -EINVAL;
638 goto out;
639 }
640
641 /* Disable the TMC if need be */
642 if (drvdata->mode == CS_MODE_SYSFS) {
643 /* There is no point in reading a TMC in HW FIFO mode */
644 mode = readl_relaxed(drvdata->base + TMC_MODE);
645 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
646 ret = -EINVAL;
647 goto out;
648 }
649 __tmc_etb_disable_hw(drvdata);
650 }
651
652 drvdata->reading = true;
653 out:
654 spin_unlock_irqrestore(&drvdata->spinlock, flags);
655
656 return ret;
657 }
658
tmc_read_unprepare_etb(struct tmc_drvdata * drvdata)659 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
660 {
661 char *buf = NULL;
662 enum tmc_mode mode;
663 unsigned long flags;
664 int rc = 0;
665
666 /* config types are set a boot time and never change */
667 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
668 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
669 return -EINVAL;
670
671 spin_lock_irqsave(&drvdata->spinlock, flags);
672
673 /* Re-enable the TMC if need be */
674 if (drvdata->mode == CS_MODE_SYSFS) {
675 /* There is no point in reading a TMC in HW FIFO mode */
676 mode = readl_relaxed(drvdata->base + TMC_MODE);
677 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
678 spin_unlock_irqrestore(&drvdata->spinlock, flags);
679 return -EINVAL;
680 }
681 /*
682 * The trace run will continue with the same allocated trace
683 * buffer. As such zero-out the buffer so that we don't end
684 * up with stale data.
685 *
686 * Since the tracer is still enabled drvdata::buf
687 * can't be NULL.
688 */
689 memset(drvdata->buf, 0, drvdata->size);
690 rc = __tmc_etb_enable_hw(drvdata);
691 if (rc) {
692 spin_unlock_irqrestore(&drvdata->spinlock, flags);
693 return rc;
694 }
695 } else {
696 /*
697 * The ETB/ETF is not tracing and the buffer was just read.
698 * As such prepare to free the trace buffer.
699 */
700 buf = drvdata->buf;
701 drvdata->buf = NULL;
702 }
703
704 drvdata->reading = false;
705 spin_unlock_irqrestore(&drvdata->spinlock, flags);
706
707 /*
708 * Free allocated memory outside of the spinlock. There is no need
709 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
710 */
711 kfree(buf);
712
713 return 0;
714 }
715