1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021. Huawei Technologies Co., Ltd
4  */
5 #include <linux/kernel.h>
6 #include <linux/bpf_verifier.h>
7 #include <linux/bpf.h>
8 #include <linux/btf.h>
9 
10 extern struct bpf_struct_ops bpf_bpf_dummy_ops;
11 
12 /* A common type for test_N with return value in bpf_dummy_ops */
13 typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
14 
15 struct bpf_dummy_ops_test_args {
16 	u64 args[MAX_BPF_FUNC_ARGS];
17 	struct bpf_dummy_ops_state state;
18 };
19 
20 static struct bpf_dummy_ops_test_args *
dummy_ops_init_args(const union bpf_attr * kattr,unsigned int nr)21 dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
22 {
23 	__u32 size_in;
24 	struct bpf_dummy_ops_test_args *args;
25 	void __user *ctx_in;
26 	void __user *u_state;
27 
28 	size_in = kattr->test.ctx_size_in;
29 	if (size_in != sizeof(u64) * nr)
30 		return ERR_PTR(-EINVAL);
31 
32 	args = kzalloc(sizeof(*args), GFP_KERNEL);
33 	if (!args)
34 		return ERR_PTR(-ENOMEM);
35 
36 	ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
37 	if (copy_from_user(args->args, ctx_in, size_in))
38 		goto out;
39 
40 	/* args[0] is 0 means state argument of test_N will be NULL */
41 	u_state = u64_to_user_ptr(args->args[0]);
42 	if (u_state && copy_from_user(&args->state, u_state,
43 				      sizeof(args->state)))
44 		goto out;
45 
46 	return args;
47 out:
48 	kfree(args);
49 	return ERR_PTR(-EFAULT);
50 }
51 
dummy_ops_copy_args(struct bpf_dummy_ops_test_args * args)52 static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
53 {
54 	void __user *u_state;
55 
56 	u_state = u64_to_user_ptr(args->args[0]);
57 	if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
58 		return -EFAULT;
59 
60 	return 0;
61 }
62 
dummy_ops_call_op(void * image,struct bpf_dummy_ops_test_args * args)63 static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
64 {
65 	dummy_ops_test_ret_fn test = (void *)image;
66 	struct bpf_dummy_ops_state *state = NULL;
67 
68 	/* state needs to be NULL if args[0] is 0 */
69 	if (args->args[0])
70 		state = &args->state;
71 	return test(state, args->args[1], args->args[2],
72 		    args->args[3], args->args[4]);
73 }
74 
75 extern const struct bpf_link_ops bpf_struct_ops_link_lops;
76 
bpf_struct_ops_test_run(struct bpf_prog * prog,const union bpf_attr * kattr,union bpf_attr __user * uattr)77 int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
78 			    union bpf_attr __user *uattr)
79 {
80 	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
81 	const struct btf_type *func_proto;
82 	struct bpf_dummy_ops_test_args *args;
83 	struct bpf_tramp_links *tlinks;
84 	struct bpf_tramp_link *link = NULL;
85 	void *image = NULL;
86 	unsigned int op_idx;
87 	int prog_ret;
88 	int err;
89 
90 	if (prog->aux->attach_btf_id != st_ops->type_id)
91 		return -EOPNOTSUPP;
92 
93 	func_proto = prog->aux->attach_func_proto;
94 	args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
95 	if (IS_ERR(args))
96 		return PTR_ERR(args);
97 
98 	tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
99 	if (!tlinks) {
100 		err = -ENOMEM;
101 		goto out;
102 	}
103 
104 	image = bpf_jit_alloc_exec(PAGE_SIZE);
105 	if (!image) {
106 		err = -ENOMEM;
107 		goto out;
108 	}
109 	set_vm_flush_reset_perms(image);
110 
111 	link = kzalloc(sizeof(*link), GFP_USER);
112 	if (!link) {
113 		err = -ENOMEM;
114 		goto out;
115 	}
116 	/* prog doesn't take the ownership of the reference from caller */
117 	bpf_prog_inc(prog);
118 	bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
119 
120 	op_idx = prog->expected_attach_type;
121 	err = bpf_struct_ops_prepare_trampoline(tlinks, link,
122 						&st_ops->func_models[op_idx],
123 						image, image + PAGE_SIZE);
124 	if (err < 0)
125 		goto out;
126 
127 	set_memory_rox((long)image, 1);
128 	prog_ret = dummy_ops_call_op(image, args);
129 
130 	err = dummy_ops_copy_args(args);
131 	if (err)
132 		goto out;
133 	if (put_user(prog_ret, &uattr->test.retval))
134 		err = -EFAULT;
135 out:
136 	kfree(args);
137 	bpf_jit_free_exec(image);
138 	if (link)
139 		bpf_link_put(&link->link);
140 	kfree(tlinks);
141 	return err;
142 }
143 
bpf_dummy_init(struct btf * btf)144 static int bpf_dummy_init(struct btf *btf)
145 {
146 	return 0;
147 }
148 
bpf_dummy_ops_is_valid_access(int off,int size,enum bpf_access_type type,const struct bpf_prog * prog,struct bpf_insn_access_aux * info)149 static bool bpf_dummy_ops_is_valid_access(int off, int size,
150 					  enum bpf_access_type type,
151 					  const struct bpf_prog *prog,
152 					  struct bpf_insn_access_aux *info)
153 {
154 	return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
155 }
156 
bpf_dummy_ops_check_member(const struct btf_type * t,const struct btf_member * member,const struct bpf_prog * prog)157 static int bpf_dummy_ops_check_member(const struct btf_type *t,
158 				      const struct btf_member *member,
159 				      const struct bpf_prog *prog)
160 {
161 	u32 moff = __btf_member_bit_offset(t, member) / 8;
162 
163 	switch (moff) {
164 	case offsetof(struct bpf_dummy_ops, test_sleepable):
165 		break;
166 	default:
167 		if (prog->aux->sleepable)
168 			return -EINVAL;
169 	}
170 
171 	return 0;
172 }
173 
bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log * log,const struct bpf_reg_state * reg,int off,int size,enum bpf_access_type atype,u32 * next_btf_id,enum bpf_type_flag * flag)174 static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
175 					   const struct bpf_reg_state *reg,
176 					   int off, int size, enum bpf_access_type atype,
177 					   u32 *next_btf_id,
178 					   enum bpf_type_flag *flag)
179 {
180 	const struct btf_type *state;
181 	const struct btf_type *t;
182 	s32 type_id;
183 	int err;
184 
185 	type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
186 					BTF_KIND_STRUCT);
187 	if (type_id < 0)
188 		return -EINVAL;
189 
190 	t = btf_type_by_id(reg->btf, reg->btf_id);
191 	state = btf_type_by_id(reg->btf, type_id);
192 	if (t != state) {
193 		bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
194 		return -EACCES;
195 	}
196 
197 	err = btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
198 	if (err < 0)
199 		return err;
200 
201 	return atype == BPF_READ ? err : NOT_INIT;
202 }
203 
204 static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
205 	.is_valid_access = bpf_dummy_ops_is_valid_access,
206 	.btf_struct_access = bpf_dummy_ops_btf_struct_access,
207 };
208 
bpf_dummy_init_member(const struct btf_type * t,const struct btf_member * member,void * kdata,const void * udata)209 static int bpf_dummy_init_member(const struct btf_type *t,
210 				 const struct btf_member *member,
211 				 void *kdata, const void *udata)
212 {
213 	return -EOPNOTSUPP;
214 }
215 
bpf_dummy_reg(void * kdata)216 static int bpf_dummy_reg(void *kdata)
217 {
218 	return -EOPNOTSUPP;
219 }
220 
bpf_dummy_unreg(void * kdata)221 static void bpf_dummy_unreg(void *kdata)
222 {
223 }
224 
225 struct bpf_struct_ops bpf_bpf_dummy_ops = {
226 	.verifier_ops = &bpf_dummy_verifier_ops,
227 	.init = bpf_dummy_init,
228 	.check_member = bpf_dummy_ops_check_member,
229 	.init_member = bpf_dummy_init_member,
230 	.reg = bpf_dummy_reg,
231 	.unreg = bpf_dummy_unreg,
232 	.name = "bpf_dummy_ops",
233 };
234