1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/export.h>
3 #include <linux/ref_tracker.h>
4 #include <linux/slab.h>
5 #include <linux/stacktrace.h>
6 #include <linux/stackdepot.h>
7
8 #define REF_TRACKER_STACK_ENTRIES 16
9
10 struct ref_tracker {
11 struct list_head head; /* anchor into dir->list or dir->quarantine */
12 bool dead;
13 depot_stack_handle_t alloc_stack_handle;
14 depot_stack_handle_t free_stack_handle;
15 };
16
ref_tracker_dir_exit(struct ref_tracker_dir * dir)17 void ref_tracker_dir_exit(struct ref_tracker_dir *dir)
18 {
19 struct ref_tracker *tracker, *n;
20 unsigned long flags;
21 bool leak = false;
22
23 dir->dead = true;
24 spin_lock_irqsave(&dir->lock, flags);
25 list_for_each_entry_safe(tracker, n, &dir->quarantine, head) {
26 list_del(&tracker->head);
27 kfree(tracker);
28 dir->quarantine_avail++;
29 }
30 list_for_each_entry_safe(tracker, n, &dir->list, head) {
31 pr_err("leaked reference.\n");
32 if (tracker->alloc_stack_handle)
33 stack_depot_print(tracker->alloc_stack_handle);
34 leak = true;
35 list_del(&tracker->head);
36 kfree(tracker);
37 }
38 spin_unlock_irqrestore(&dir->lock, flags);
39 WARN_ON_ONCE(leak);
40 WARN_ON_ONCE(refcount_read(&dir->untracked) != 1);
41 WARN_ON_ONCE(refcount_read(&dir->no_tracker) != 1);
42 }
43 EXPORT_SYMBOL(ref_tracker_dir_exit);
44
ref_tracker_dir_print(struct ref_tracker_dir * dir,unsigned int display_limit)45 void ref_tracker_dir_print(struct ref_tracker_dir *dir,
46 unsigned int display_limit)
47 {
48 struct ref_tracker *tracker;
49 unsigned long flags;
50 unsigned int i = 0;
51
52 spin_lock_irqsave(&dir->lock, flags);
53 list_for_each_entry(tracker, &dir->list, head) {
54 if (i < display_limit) {
55 pr_err("leaked reference.\n");
56 if (tracker->alloc_stack_handle)
57 stack_depot_print(tracker->alloc_stack_handle);
58 i++;
59 } else {
60 break;
61 }
62 }
63 spin_unlock_irqrestore(&dir->lock, flags);
64 }
65 EXPORT_SYMBOL(ref_tracker_dir_print);
66
ref_tracker_alloc(struct ref_tracker_dir * dir,struct ref_tracker ** trackerp,gfp_t gfp)67 int ref_tracker_alloc(struct ref_tracker_dir *dir,
68 struct ref_tracker **trackerp,
69 gfp_t gfp)
70 {
71 unsigned long entries[REF_TRACKER_STACK_ENTRIES];
72 struct ref_tracker *tracker;
73 unsigned int nr_entries;
74 gfp_t gfp_mask = gfp;
75 unsigned long flags;
76
77 WARN_ON_ONCE(dir->dead);
78
79 if (!trackerp) {
80 refcount_inc(&dir->no_tracker);
81 return 0;
82 }
83 if (gfp & __GFP_DIRECT_RECLAIM)
84 gfp_mask |= __GFP_NOFAIL;
85 *trackerp = tracker = kzalloc(sizeof(*tracker), gfp_mask);
86 if (unlikely(!tracker)) {
87 pr_err_once("memory allocation failure, unreliable refcount tracker.\n");
88 refcount_inc(&dir->untracked);
89 return -ENOMEM;
90 }
91 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
92 tracker->alloc_stack_handle = stack_depot_save(entries, nr_entries, gfp);
93
94 spin_lock_irqsave(&dir->lock, flags);
95 list_add(&tracker->head, &dir->list);
96 spin_unlock_irqrestore(&dir->lock, flags);
97 return 0;
98 }
99 EXPORT_SYMBOL_GPL(ref_tracker_alloc);
100
ref_tracker_free(struct ref_tracker_dir * dir,struct ref_tracker ** trackerp)101 int ref_tracker_free(struct ref_tracker_dir *dir,
102 struct ref_tracker **trackerp)
103 {
104 unsigned long entries[REF_TRACKER_STACK_ENTRIES];
105 depot_stack_handle_t stack_handle;
106 struct ref_tracker *tracker;
107 unsigned int nr_entries;
108 unsigned long flags;
109
110 WARN_ON_ONCE(dir->dead);
111
112 if (!trackerp) {
113 refcount_dec(&dir->no_tracker);
114 return 0;
115 }
116 tracker = *trackerp;
117 if (!tracker) {
118 refcount_dec(&dir->untracked);
119 return -EEXIST;
120 }
121 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
122 stack_handle = stack_depot_save(entries, nr_entries, GFP_ATOMIC);
123
124 spin_lock_irqsave(&dir->lock, flags);
125 if (tracker->dead) {
126 pr_err("reference already released.\n");
127 if (tracker->alloc_stack_handle) {
128 pr_err("allocated in:\n");
129 stack_depot_print(tracker->alloc_stack_handle);
130 }
131 if (tracker->free_stack_handle) {
132 pr_err("freed in:\n");
133 stack_depot_print(tracker->free_stack_handle);
134 }
135 spin_unlock_irqrestore(&dir->lock, flags);
136 WARN_ON_ONCE(1);
137 return -EINVAL;
138 }
139 tracker->dead = true;
140
141 tracker->free_stack_handle = stack_handle;
142
143 list_move_tail(&tracker->head, &dir->quarantine);
144 if (!dir->quarantine_avail) {
145 tracker = list_first_entry(&dir->quarantine, struct ref_tracker, head);
146 list_del(&tracker->head);
147 } else {
148 dir->quarantine_avail--;
149 tracker = NULL;
150 }
151 spin_unlock_irqrestore(&dir->lock, flags);
152
153 kfree(tracker);
154 return 0;
155 }
156 EXPORT_SYMBOL_GPL(ref_tracker_free);
157