1 /* SPDX-License-Identifier: bzip2-1.0.6 */
2 /*
3    This file is part of MemCheck, a heavyweight Valgrind tool for
4    detecting memory errors.
5 
6    Copyright (C) 2000-2017 Julian Seward.  All rights reserved.
7  */
8 
9 #ifndef __MEMCHECK_H
10 #define __MEMCHECK_H
11 
12 /* This file is for inclusion into client (your!) code.
13 
14    You can use these macros to manipulate and query memory permissions
15    inside your own programs.
16 
17    See comment near the top of valgrind.h on how to use them.
18 */
19 
20 #include "valgrind.h"
21 
22 /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
23    This enum comprises an ABI exported by Valgrind to programs
24    which use client requests.  DO NOT CHANGE THE ORDER OF THESE
25    ENTRIES, NOR DELETE ANY -- add new ones at the end. */
26 typedef
27    enum {
28       VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
29       VG_USERREQ__MAKE_MEM_UNDEFINED,
30       VG_USERREQ__MAKE_MEM_DEFINED,
31       VG_USERREQ__DISCARD,
32       VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
33       VG_USERREQ__CHECK_MEM_IS_DEFINED,
34       VG_USERREQ__DO_LEAK_CHECK,
35       VG_USERREQ__COUNT_LEAKS,
36 
37       VG_USERREQ__GET_VBITS,
38       VG_USERREQ__SET_VBITS,
39 
40       VG_USERREQ__CREATE_BLOCK,
41 
42       VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
43 
44       /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
45       VG_USERREQ__COUNT_LEAK_BLOCKS,
46 
47       VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
48       VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
49 
50       /* This is just for memcheck's internal use - don't use it */
51       _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
52          = VG_USERREQ_TOOL_BASE('M','C') + 256
53    } Vg_MemCheckClientRequest;
54 
55 /* Client-code macros to manipulate the state of memory. */
56 
57 /* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
58 #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len)           \
59     VALGRIND_DO_CLIENT_REQUEST_STMT(                             \
60                             VG_USERREQ__MAKE_MEM_NOACCESS,       \
61                             (_qzz_addr), (_qzz_len), 0, 0, 0)
62 
63 /* Similarly, mark memory at _qzz_addr as addressable but undefined
64    for _qzz_len bytes. */
65 #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len)          \
66     VALGRIND_DO_CLIENT_REQUEST_STMT(                             \
67                             VG_USERREQ__MAKE_MEM_UNDEFINED,      \
68                             (_qzz_addr), (_qzz_len), 0, 0, 0)
69 
70 /* Similarly, mark memory at _qzz_addr as addressable and defined
71    for _qzz_len bytes. */
72 #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len)            \
73     VALGRIND_DO_CLIENT_REQUEST_STMT(                             \
74                             VG_USERREQ__MAKE_MEM_DEFINED,        \
75                             (_qzz_addr), (_qzz_len), 0, 0, 0)
76 
77 /* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
78    not altered: bytes which are addressable are marked as defined,
79    but those which are not addressable are left unchanged. */
80 #define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len)     \
81     VALGRIND_DO_CLIENT_REQUEST_STMT(                                     \
82                             VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
83                             (_qzz_addr), (_qzz_len), 0, 0, 0)
84 
85 /* Create a block-description handle.  The description is an ascii
86    string which is included in any messages pertaining to addresses
87    within the specified memory range.  Has no other effect on the
88    properties of the memory range. */
89 #define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc)	   \
90     VALGRIND_DO_CLIENT_REQUEST_STMT(                               \
91                             VG_USERREQ__CREATE_BLOCK,              \
92                             (_qzz_addr), (_qzz_len), (_qzz_desc),  \
93                             0, 0)
94 
95 /* Discard a block-description-handle. Returns 1 for an
96    invalid handle, 0 for a valid handle. */
97 #define VALGRIND_DISCARD(_qzz_blkindex)                          \
98     VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,      \
99                             VG_USERREQ__DISCARD,                 \
100                             0, (_qzz_blkindex), 0, 0, 0)
101 
102 /* Client-code macros to check the state of memory. */
103 
104 /* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
105    If suitable addressibility is not established, Valgrind prints an
106    error message and returns the address of the first offending byte.
107    Otherwise it returns zero. */
108 #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len)      \
109     VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                             \
110                             VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,  \
111                             (_qzz_addr), (_qzz_len), 0, 0, 0)
112 
113 /* Check that memory at _qzz_addr is addressable and defined for
114    _qzz_len bytes.  If suitable addressibility and definedness are not
115    established, Valgrind prints an error message and returns the
116    address of the first offending byte.  Otherwise it returns zero. */
117 #define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len)        \
118     VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                           \
119                             VG_USERREQ__CHECK_MEM_IS_DEFINED,    \
120                             (_qzz_addr), (_qzz_len), 0, 0, 0)
121 
122 /* Use this macro to force the definedness and addressibility of an
123    lvalue to be checked.  If suitable addressibility and definedness
124    are not established, Valgrind prints an error message and returns
125    the address of the first offending byte.  Otherwise it returns
126    zero. */
127 #define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue)                \
128    VALGRIND_CHECK_MEM_IS_DEFINED(                                \
129       (volatile unsigned char *)&(__lvalue),                     \
130                       (unsigned long)(sizeof (__lvalue)))
131 
132 /* Do a full memory leak check (like --leak-check=full) mid-execution. */
133 #define VALGRIND_DO_LEAK_CHECK                                   \
134     VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK,   \
135                                     0, 0, 0, 0, 0)
136 
137 /* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
138    which there was an increase in leaked bytes or leaked nr of blocks
139    since the previous leak search. */
140 #define VALGRIND_DO_ADDED_LEAK_CHECK                            \
141     VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK,  \
142                                     0, 1, 0, 0, 0)
143 
144 /* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
145    increased or decreased leaked bytes/blocks since previous leak
146    search. */
147 #define VALGRIND_DO_CHANGED_LEAK_CHECK                          \
148     VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK,  \
149                                     0, 2, 0, 0, 0)
150 
151 /* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
152 #define VALGRIND_DO_QUICK_LEAK_CHECK                             \
153     VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK,   \
154                                     1, 0, 0, 0, 0)
155 
156 /* Return number of leaked, dubious, reachable and suppressed bytes found by
157    all previous leak checks.  They must be lvalues.  */
158 #define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed)     \
159    /* For safety on 64-bit platforms we assign the results to private
160       unsigned long variables, then assign these to the lvalues the user
161       specified, which works no matter what type 'leaked', 'dubious', etc
162       are.  We also initialise '_qzz_leaked', etc because
163       VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
164       defined. */                                                        \
165    {                                                                     \
166     unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
167     unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
168     VALGRIND_DO_CLIENT_REQUEST_STMT(                                     \
169                                VG_USERREQ__COUNT_LEAKS,                  \
170                                &_qzz_leaked, &_qzz_dubious,              \
171                                &_qzz_reachable, &_qzz_suppressed, 0);    \
172     leaked     = _qzz_leaked;                                            \
173     dubious    = _qzz_dubious;                                           \
174     reachable  = _qzz_reachable;                                         \
175     suppressed = _qzz_suppressed;                                        \
176    }
177 
178 /* Return number of leaked, dubious, reachable and suppressed bytes found by
179    all previous leak checks.  They must be lvalues.  */
180 #define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
181    /* For safety on 64-bit platforms we assign the results to private
182       unsigned long variables, then assign these to the lvalues the user
183       specified, which works no matter what type 'leaked', 'dubious', etc
184       are.  We also initialise '_qzz_leaked', etc because
185       VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
186       defined. */                                                        \
187    {                                                                     \
188     unsigned long _qzz_leaked    = 0, _qzz_dubious    = 0;               \
189     unsigned long _qzz_reachable = 0, _qzz_suppressed = 0;               \
190     VALGRIND_DO_CLIENT_REQUEST_STMT(                                     \
191                                VG_USERREQ__COUNT_LEAK_BLOCKS,            \
192                                &_qzz_leaked, &_qzz_dubious,              \
193                                &_qzz_reachable, &_qzz_suppressed, 0);    \
194     leaked     = _qzz_leaked;                                            \
195     dubious    = _qzz_dubious;                                           \
196     reachable  = _qzz_reachable;                                         \
197     suppressed = _qzz_suppressed;                                        \
198    }
199 
200 /* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
201    into the provided zzvbits array.  Return values:
202       0   if not running on valgrind
203       1   success
204       2   [previously indicated unaligned arrays;  these are now allowed]
205       3   if any parts of zzsrc/zzvbits are not addressable.
206    The metadata is not copied in cases 0, 2 or 3 so it should be
207    impossible to segfault your system by using this call.
208 */
209 #define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes)                \
210     (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                \
211                                     VG_USERREQ__GET_VBITS,      \
212                                     (const char*)(zza),         \
213                                     (char*)(zzvbits),           \
214                                     (zznbytes), 0, 0)
215 
216 /* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
217    from the provided zzvbits array.  Return values:
218       0   if not running on valgrind
219       1   success
220       2   [previously indicated unaligned arrays;  these are now allowed]
221       3   if any parts of zza/zzvbits are not addressable.
222    The metadata is not copied in cases 0, 2 or 3 so it should be
223    impossible to segfault your system by using this call.
224 */
225 #define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes)                \
226     (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0,                \
227                                     VG_USERREQ__SET_VBITS,      \
228                                     (const char*)(zza),         \
229                                     (const char*)(zzvbits),     \
230                                     (zznbytes), 0, 0 )
231 
232 /* Disable and re-enable reporting of addressing errors in the
233    specified address range. */
234 #define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
235     VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,    \
236        VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,      \
237        (_qzz_addr), (_qzz_len), 0, 0, 0)
238 
239 #define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
240     VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */,    \
241        VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,       \
242        (_qzz_addr), (_qzz_len), 0, 0, 0)
243 
244 #endif
245 
246