1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/lockd/clntproc.c
4 *
5 * RPC procedures for the client side NLM implementation
6 *
7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/utsname.h>
18 #include <linux/freezer.h>
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/sunrpc/svc.h>
21 #include <linux/lockd/lockd.h>
22
23 #define NLMDBG_FACILITY NLMDBG_CLIENT
24 #define NLMCLNT_GRACE_WAIT (5*HZ)
25 #define NLMCLNT_POLL_TIMEOUT (30*HZ)
26 #define NLMCLNT_MAX_RETRIES 3
27
28 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
29 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
30 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
31 static int nlm_stat_to_errno(__be32 stat);
32 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
33 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *);
34
35 static const struct rpc_call_ops nlmclnt_unlock_ops;
36 static const struct rpc_call_ops nlmclnt_cancel_ops;
37
38 /*
39 * Cookie counter for NLM requests
40 */
41 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
42
nlmclnt_next_cookie(struct nlm_cookie * c)43 void nlmclnt_next_cookie(struct nlm_cookie *c)
44 {
45 u32 cookie = atomic_inc_return(&nlm_cookie);
46
47 memcpy(c->data, &cookie, 4);
48 c->len=4;
49 }
50
51 static struct nlm_lockowner *
nlmclnt_get_lockowner(struct nlm_lockowner * lockowner)52 nlmclnt_get_lockowner(struct nlm_lockowner *lockowner)
53 {
54 refcount_inc(&lockowner->count);
55 return lockowner;
56 }
57
nlmclnt_put_lockowner(struct nlm_lockowner * lockowner)58 static void nlmclnt_put_lockowner(struct nlm_lockowner *lockowner)
59 {
60 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock))
61 return;
62 list_del(&lockowner->list);
63 spin_unlock(&lockowner->host->h_lock);
64 nlmclnt_release_host(lockowner->host);
65 kfree(lockowner);
66 }
67
nlm_pidbusy(struct nlm_host * host,uint32_t pid)68 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid)
69 {
70 struct nlm_lockowner *lockowner;
71 list_for_each_entry(lockowner, &host->h_lockowners, list) {
72 if (lockowner->pid == pid)
73 return -EBUSY;
74 }
75 return 0;
76 }
77
__nlm_alloc_pid(struct nlm_host * host)78 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host)
79 {
80 uint32_t res;
81 do {
82 res = host->h_pidcount++;
83 } while (nlm_pidbusy(host, res) < 0);
84 return res;
85 }
86
__nlmclnt_find_lockowner(struct nlm_host * host,fl_owner_t owner)87 static struct nlm_lockowner *__nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
88 {
89 struct nlm_lockowner *lockowner;
90 list_for_each_entry(lockowner, &host->h_lockowners, list) {
91 if (lockowner->owner != owner)
92 continue;
93 return nlmclnt_get_lockowner(lockowner);
94 }
95 return NULL;
96 }
97
nlmclnt_find_lockowner(struct nlm_host * host,fl_owner_t owner)98 static struct nlm_lockowner *nlmclnt_find_lockowner(struct nlm_host *host, fl_owner_t owner)
99 {
100 struct nlm_lockowner *res, *new = NULL;
101
102 spin_lock(&host->h_lock);
103 res = __nlmclnt_find_lockowner(host, owner);
104 if (res == NULL) {
105 spin_unlock(&host->h_lock);
106 new = kmalloc(sizeof(*new), GFP_KERNEL);
107 spin_lock(&host->h_lock);
108 res = __nlmclnt_find_lockowner(host, owner);
109 if (res == NULL && new != NULL) {
110 res = new;
111 refcount_set(&new->count, 1);
112 new->owner = owner;
113 new->pid = __nlm_alloc_pid(host);
114 new->host = nlm_get_host(host);
115 list_add(&new->list, &host->h_lockowners);
116 new = NULL;
117 }
118 }
119 spin_unlock(&host->h_lock);
120 kfree(new);
121 return res;
122 }
123
124 /*
125 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls
126 */
nlmclnt_setlockargs(struct nlm_rqst * req,struct file_lock * fl)127 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
128 {
129 struct nlm_args *argp = &req->a_args;
130 struct nlm_lock *lock = &argp->lock;
131 char *nodename = req->a_host->h_rpcclnt->cl_nodename;
132
133 nlmclnt_next_cookie(&argp->cookie);
134 memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
135 lock->caller = nodename;
136 lock->oh.data = req->a_owner;
137 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
138 (unsigned int)fl->fl_u.nfs_fl.owner->pid,
139 nodename);
140 lock->svid = fl->fl_u.nfs_fl.owner->pid;
141 lock->fl.fl_start = fl->fl_start;
142 lock->fl.fl_end = fl->fl_end;
143 lock->fl.fl_type = fl->fl_type;
144 }
145
nlmclnt_release_lockargs(struct nlm_rqst * req)146 static void nlmclnt_release_lockargs(struct nlm_rqst *req)
147 {
148 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL);
149 }
150
151 /**
152 * nlmclnt_proc - Perform a single client-side lock request
153 * @host: address of a valid nlm_host context representing the NLM server
154 * @cmd: fcntl-style file lock operation to perform
155 * @fl: address of arguments for the lock operation
156 * @data: address of data to be sent to callback operations
157 *
158 */
nlmclnt_proc(struct nlm_host * host,int cmd,struct file_lock * fl,void * data)159 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data)
160 {
161 struct nlm_rqst *call;
162 int status;
163 const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops;
164
165 call = nlm_alloc_call(host);
166 if (call == NULL)
167 return -ENOMEM;
168
169 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call)
170 nlmclnt_ops->nlmclnt_alloc_call(data);
171
172 nlmclnt_locks_init_private(fl, host);
173 if (!fl->fl_u.nfs_fl.owner) {
174 /* lockowner allocation has failed */
175 nlmclnt_release_call(call);
176 return -ENOMEM;
177 }
178 /* Set up the argument struct */
179 nlmclnt_setlockargs(call, fl);
180 call->a_callback_data = data;
181
182 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) {
183 if (fl->fl_type != F_UNLCK) {
184 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
185 status = nlmclnt_lock(call, fl);
186 } else
187 status = nlmclnt_unlock(call, fl);
188 } else if (IS_GETLK(cmd))
189 status = nlmclnt_test(call, fl);
190 else
191 status = -EINVAL;
192 fl->fl_ops->fl_release_private(fl);
193 fl->fl_ops = NULL;
194
195 dprintk("lockd: clnt proc returns %d\n", status);
196 return status;
197 }
198 EXPORT_SYMBOL_GPL(nlmclnt_proc);
199
200 /*
201 * Allocate an NLM RPC call struct
202 */
nlm_alloc_call(struct nlm_host * host)203 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host)
204 {
205 struct nlm_rqst *call;
206
207 for(;;) {
208 call = kzalloc(sizeof(*call), GFP_KERNEL);
209 if (call != NULL) {
210 refcount_set(&call->a_count, 1);
211 locks_init_lock(&call->a_args.lock.fl);
212 locks_init_lock(&call->a_res.lock.fl);
213 call->a_host = nlm_get_host(host);
214 return call;
215 }
216 if (signalled())
217 break;
218 printk("nlm_alloc_call: failed, waiting for memory\n");
219 schedule_timeout_interruptible(5*HZ);
220 }
221 return NULL;
222 }
223
nlmclnt_release_call(struct nlm_rqst * call)224 void nlmclnt_release_call(struct nlm_rqst *call)
225 {
226 const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
227
228 if (!refcount_dec_and_test(&call->a_count))
229 return;
230 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call)
231 nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
232 nlmclnt_release_host(call->a_host);
233 nlmclnt_release_lockargs(call);
234 kfree(call);
235 }
236
nlmclnt_rpc_release(void * data)237 static void nlmclnt_rpc_release(void *data)
238 {
239 nlmclnt_release_call(data);
240 }
241
nlm_wait_on_grace(wait_queue_head_t * queue)242 static int nlm_wait_on_grace(wait_queue_head_t *queue)
243 {
244 DEFINE_WAIT(wait);
245 int status = -EINTR;
246
247 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE);
248 if (!signalled ()) {
249 schedule_timeout(NLMCLNT_GRACE_WAIT);
250 try_to_freeze();
251 if (!signalled ())
252 status = 0;
253 }
254 finish_wait(queue, &wait);
255 return status;
256 }
257
258 /*
259 * Generic NLM call
260 */
261 static int
nlmclnt_call(const struct cred * cred,struct nlm_rqst * req,u32 proc)262 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc)
263 {
264 struct nlm_host *host = req->a_host;
265 struct rpc_clnt *clnt;
266 struct nlm_args *argp = &req->a_args;
267 struct nlm_res *resp = &req->a_res;
268 struct rpc_message msg = {
269 .rpc_argp = argp,
270 .rpc_resp = resp,
271 .rpc_cred = cred,
272 };
273 int status;
274
275 dprintk("lockd: call procedure %d on %s\n",
276 (int)proc, host->h_name);
277
278 do {
279 if (host->h_reclaiming && !argp->reclaim)
280 goto in_grace_period;
281
282 /* If we have no RPC client yet, create one. */
283 if ((clnt = nlm_bind_host(host)) == NULL)
284 return -ENOLCK;
285 msg.rpc_proc = &clnt->cl_procinfo[proc];
286
287 /* Perform the RPC call. If an error occurs, try again */
288 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) {
289 dprintk("lockd: rpc_call returned error %d\n", -status);
290 switch (status) {
291 case -EPROTONOSUPPORT:
292 status = -EINVAL;
293 break;
294 case -ECONNREFUSED:
295 case -ETIMEDOUT:
296 case -ENOTCONN:
297 nlm_rebind_host(host);
298 status = -EAGAIN;
299 break;
300 case -ERESTARTSYS:
301 return signalled () ? -EINTR : status;
302 default:
303 break;
304 }
305 break;
306 } else
307 if (resp->status == nlm_lck_denied_grace_period) {
308 dprintk("lockd: server in grace period\n");
309 if (argp->reclaim) {
310 printk(KERN_WARNING
311 "lockd: spurious grace period reject?!\n");
312 return -ENOLCK;
313 }
314 } else {
315 if (!argp->reclaim) {
316 /* We appear to be out of the grace period */
317 wake_up_all(&host->h_gracewait);
318 }
319 dprintk("lockd: server returns status %d\n",
320 ntohl(resp->status));
321 return 0; /* Okay, call complete */
322 }
323
324 in_grace_period:
325 /*
326 * The server has rebooted and appears to be in the grace
327 * period during which locks are only allowed to be
328 * reclaimed.
329 * We can only back off and try again later.
330 */
331 status = nlm_wait_on_grace(&host->h_gracewait);
332 } while (status == 0);
333
334 return status;
335 }
336
337 /*
338 * Generic NLM call, async version.
339 */
__nlm_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)340 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
341 {
342 struct nlm_host *host = req->a_host;
343 struct rpc_clnt *clnt;
344 struct rpc_task_setup task_setup_data = {
345 .rpc_message = msg,
346 .callback_ops = tk_ops,
347 .callback_data = req,
348 .flags = RPC_TASK_ASYNC,
349 };
350
351 dprintk("lockd: call procedure %d on %s (async)\n",
352 (int)proc, host->h_name);
353
354 /* If we have no RPC client yet, create one. */
355 clnt = nlm_bind_host(host);
356 if (clnt == NULL)
357 goto out_err;
358 msg->rpc_proc = &clnt->cl_procinfo[proc];
359 task_setup_data.rpc_client = clnt;
360
361 /* bootstrap and kick off the async RPC call */
362 return rpc_run_task(&task_setup_data);
363 out_err:
364 tk_ops->rpc_release(req);
365 return ERR_PTR(-ENOLCK);
366 }
367
nlm_do_async_call(struct nlm_rqst * req,u32 proc,struct rpc_message * msg,const struct rpc_call_ops * tk_ops)368 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops)
369 {
370 struct rpc_task *task;
371
372 task = __nlm_async_call(req, proc, msg, tk_ops);
373 if (IS_ERR(task))
374 return PTR_ERR(task);
375 rpc_put_task(task);
376 return 0;
377 }
378
379 /*
380 * NLM asynchronous call.
381 */
nlm_async_call(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)382 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
383 {
384 struct rpc_message msg = {
385 .rpc_argp = &req->a_args,
386 .rpc_resp = &req->a_res,
387 };
388 return nlm_do_async_call(req, proc, &msg, tk_ops);
389 }
390
nlm_async_reply(struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)391 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
392 {
393 struct rpc_message msg = {
394 .rpc_argp = &req->a_res,
395 };
396 return nlm_do_async_call(req, proc, &msg, tk_ops);
397 }
398
399 /*
400 * NLM client asynchronous call.
401 *
402 * Note that although the calls are asynchronous, and are therefore
403 * guaranteed to complete, we still always attempt to wait for
404 * completion in order to be able to correctly track the lock
405 * state.
406 */
nlmclnt_async_call(const struct cred * cred,struct nlm_rqst * req,u32 proc,const struct rpc_call_ops * tk_ops)407 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
408 {
409 struct rpc_message msg = {
410 .rpc_argp = &req->a_args,
411 .rpc_resp = &req->a_res,
412 .rpc_cred = cred,
413 };
414 struct rpc_task *task;
415 int err;
416
417 task = __nlm_async_call(req, proc, &msg, tk_ops);
418 if (IS_ERR(task))
419 return PTR_ERR(task);
420 err = rpc_wait_for_completion_task(task);
421 rpc_put_task(task);
422 return err;
423 }
424
425 /*
426 * TEST for the presence of a conflicting lock
427 */
428 static int
nlmclnt_test(struct nlm_rqst * req,struct file_lock * fl)429 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl)
430 {
431 int status;
432
433 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST);
434 if (status < 0)
435 goto out;
436
437 switch (req->a_res.status) {
438 case nlm_granted:
439 fl->fl_type = F_UNLCK;
440 break;
441 case nlm_lck_denied:
442 /*
443 * Report the conflicting lock back to the application.
444 */
445 fl->fl_start = req->a_res.lock.fl.fl_start;
446 fl->fl_end = req->a_res.lock.fl.fl_end;
447 fl->fl_type = req->a_res.lock.fl.fl_type;
448 fl->fl_pid = -req->a_res.lock.fl.fl_pid;
449 break;
450 default:
451 status = nlm_stat_to_errno(req->a_res.status);
452 }
453 out:
454 nlmclnt_release_call(req);
455 return status;
456 }
457
nlmclnt_locks_copy_lock(struct file_lock * new,struct file_lock * fl)458 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl)
459 {
460 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
461 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state;
462 new->fl_u.nfs_fl.owner = nlmclnt_get_lockowner(fl->fl_u.nfs_fl.owner);
463 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted);
464 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
465 }
466
nlmclnt_locks_release_private(struct file_lock * fl)467 static void nlmclnt_locks_release_private(struct file_lock *fl)
468 {
469 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock);
470 list_del(&fl->fl_u.nfs_fl.list);
471 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock);
472 nlmclnt_put_lockowner(fl->fl_u.nfs_fl.owner);
473 }
474
475 static const struct file_lock_operations nlmclnt_lock_ops = {
476 .fl_copy_lock = nlmclnt_locks_copy_lock,
477 .fl_release_private = nlmclnt_locks_release_private,
478 };
479
nlmclnt_locks_init_private(struct file_lock * fl,struct nlm_host * host)480 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host)
481 {
482 fl->fl_u.nfs_fl.state = 0;
483 fl->fl_u.nfs_fl.owner = nlmclnt_find_lockowner(host, fl->fl_owner);
484 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list);
485 fl->fl_ops = &nlmclnt_lock_ops;
486 }
487
do_vfs_lock(struct file_lock * fl)488 static int do_vfs_lock(struct file_lock *fl)
489 {
490 return locks_lock_file_wait(fl->fl_file, fl);
491 }
492
493 /*
494 * LOCK: Try to create a lock
495 *
496 * Programmer Harassment Alert
497 *
498 * When given a blocking lock request in a sync RPC call, the HPUX lockd
499 * will faithfully return LCK_BLOCKED but never cares to notify us when
500 * the lock could be granted. This way, our local process could hang
501 * around forever waiting for the callback.
502 *
503 * Solution A: Implement busy-waiting
504 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES})
505 *
506 * For now I am implementing solution A, because I hate the idea of
507 * re-implementing lockd for a third time in two months. The async
508 * calls shouldn't be too hard to do, however.
509 *
510 * This is one of the lovely things about standards in the NFS area:
511 * they're so soft and squishy you can't really blame HP for doing this.
512 */
513 static int
nlmclnt_lock(struct nlm_rqst * req,struct file_lock * fl)514 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl)
515 {
516 const struct cred *cred = nfs_file_cred(fl->fl_file);
517 struct nlm_host *host = req->a_host;
518 struct nlm_res *resp = &req->a_res;
519 struct nlm_wait *block = NULL;
520 unsigned char fl_flags = fl->fl_flags;
521 unsigned char fl_type;
522 int status = -ENOLCK;
523
524 if (nsm_monitor(host) < 0)
525 goto out;
526 req->a_args.state = nsm_local_state;
527
528 fl->fl_flags |= FL_ACCESS;
529 status = do_vfs_lock(fl);
530 fl->fl_flags = fl_flags;
531 if (status < 0)
532 goto out;
533
534 block = nlmclnt_prepare_block(host, fl);
535 again:
536 /*
537 * Initialise resp->status to a valid non-zero value,
538 * since 0 == nlm_lck_granted
539 */
540 resp->status = nlm_lck_blocked;
541 for(;;) {
542 /* Reboot protection */
543 fl->fl_u.nfs_fl.state = host->h_state;
544 status = nlmclnt_call(cred, req, NLMPROC_LOCK);
545 if (status < 0)
546 break;
547 /* Did a reclaimer thread notify us of a server reboot? */
548 if (resp->status == nlm_lck_denied_grace_period)
549 continue;
550 if (resp->status != nlm_lck_blocked)
551 break;
552 /* Wait on an NLM blocking lock */
553 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT);
554 if (status < 0)
555 break;
556 if (resp->status != nlm_lck_blocked)
557 break;
558 }
559
560 /* if we were interrupted while blocking, then cancel the lock request
561 * and exit
562 */
563 if (resp->status == nlm_lck_blocked) {
564 if (!req->a_args.block)
565 goto out_unlock;
566 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0)
567 goto out_unblock;
568 }
569
570 if (resp->status == nlm_granted) {
571 down_read(&host->h_rwsem);
572 /* Check whether or not the server has rebooted */
573 if (fl->fl_u.nfs_fl.state != host->h_state) {
574 up_read(&host->h_rwsem);
575 goto again;
576 }
577 /* Ensure the resulting lock will get added to granted list */
578 fl->fl_flags |= FL_SLEEP;
579 if (do_vfs_lock(fl) < 0)
580 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
581 up_read(&host->h_rwsem);
582 fl->fl_flags = fl_flags;
583 status = 0;
584 }
585 if (status < 0)
586 goto out_unlock;
587 /*
588 * EAGAIN doesn't make sense for sleeping locks, and in some
589 * cases NLM_LCK_DENIED is returned for a permanent error. So
590 * turn it into an ENOLCK.
591 */
592 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP))
593 status = -ENOLCK;
594 else
595 status = nlm_stat_to_errno(resp->status);
596 out_unblock:
597 nlmclnt_finish_block(block);
598 out:
599 nlmclnt_release_call(req);
600 return status;
601 out_unlock:
602 /* Fatal error: ensure that we remove the lock altogether */
603 dprintk("lockd: lock attempt ended in fatal error.\n"
604 " Attempting to unlock.\n");
605 nlmclnt_finish_block(block);
606 fl_type = fl->fl_type;
607 fl->fl_type = F_UNLCK;
608 down_read(&host->h_rwsem);
609 do_vfs_lock(fl);
610 up_read(&host->h_rwsem);
611 fl->fl_type = fl_type;
612 fl->fl_flags = fl_flags;
613 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
614 return status;
615 }
616
617 /*
618 * RECLAIM: Try to reclaim a lock
619 */
620 int
nlmclnt_reclaim(struct nlm_host * host,struct file_lock * fl,struct nlm_rqst * req)621 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl,
622 struct nlm_rqst *req)
623 {
624 int status;
625
626 memset(req, 0, sizeof(*req));
627 locks_init_lock(&req->a_args.lock.fl);
628 locks_init_lock(&req->a_res.lock.fl);
629 req->a_host = host;
630
631 /* Set up the argument struct */
632 nlmclnt_setlockargs(req, fl);
633 req->a_args.reclaim = 1;
634
635 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK);
636 if (status >= 0 && req->a_res.status == nlm_granted)
637 return 0;
638
639 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d "
640 "(errno %d, status %d)\n", fl->fl_pid,
641 status, ntohl(req->a_res.status));
642
643 /*
644 * FIXME: This is a serious failure. We can
645 *
646 * a. Ignore the problem
647 * b. Send the owning process some signal (Linux doesn't have
648 * SIGLOST, though...)
649 * c. Retry the operation
650 *
651 * Until someone comes up with a simple implementation
652 * for b or c, I'll choose option a.
653 */
654
655 return -ENOLCK;
656 }
657
658 /*
659 * UNLOCK: remove an existing lock
660 */
661 static int
nlmclnt_unlock(struct nlm_rqst * req,struct file_lock * fl)662 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
663 {
664 struct nlm_host *host = req->a_host;
665 struct nlm_res *resp = &req->a_res;
666 int status;
667 unsigned char fl_flags = fl->fl_flags;
668
669 /*
670 * Note: the server is supposed to either grant us the unlock
671 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
672 * case, we want to unlock.
673 */
674 fl->fl_flags |= FL_EXISTS;
675 down_read(&host->h_rwsem);
676 status = do_vfs_lock(fl);
677 up_read(&host->h_rwsem);
678 fl->fl_flags = fl_flags;
679 if (status == -ENOENT) {
680 status = 0;
681 goto out;
682 }
683
684 refcount_inc(&req->a_count);
685 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
686 NLMPROC_UNLOCK, &nlmclnt_unlock_ops);
687 if (status < 0)
688 goto out;
689
690 if (resp->status == nlm_granted)
691 goto out;
692
693 if (resp->status != nlm_lck_denied_nolocks)
694 printk("lockd: unexpected unlock status: %d\n",
695 ntohl(resp->status));
696 /* What to do now? I'm out of my depth... */
697 status = -ENOLCK;
698 out:
699 nlmclnt_release_call(req);
700 return status;
701 }
702
nlmclnt_unlock_prepare(struct rpc_task * task,void * data)703 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data)
704 {
705 struct nlm_rqst *req = data;
706 const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops;
707 bool defer_call = false;
708
709 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare)
710 defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data);
711
712 if (!defer_call)
713 rpc_call_start(task);
714 }
715
nlmclnt_unlock_callback(struct rpc_task * task,void * data)716 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
717 {
718 struct nlm_rqst *req = data;
719 u32 status = ntohl(req->a_res.status);
720
721 if (RPC_SIGNALLED(task))
722 goto die;
723
724 if (task->tk_status < 0) {
725 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status);
726 switch (task->tk_status) {
727 case -EACCES:
728 case -EIO:
729 goto die;
730 default:
731 goto retry_rebind;
732 }
733 }
734 if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
735 rpc_delay(task, NLMCLNT_GRACE_WAIT);
736 goto retry_unlock;
737 }
738 if (status != NLM_LCK_GRANTED)
739 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status);
740 die:
741 return;
742 retry_rebind:
743 nlm_rebind_host(req->a_host);
744 retry_unlock:
745 rpc_restart_call(task);
746 }
747
748 static const struct rpc_call_ops nlmclnt_unlock_ops = {
749 .rpc_call_prepare = nlmclnt_unlock_prepare,
750 .rpc_call_done = nlmclnt_unlock_callback,
751 .rpc_release = nlmclnt_rpc_release,
752 };
753
754 /*
755 * Cancel a blocked lock request.
756 * We always use an async RPC call for this in order not to hang a
757 * process that has been Ctrl-C'ed.
758 */
nlmclnt_cancel(struct nlm_host * host,int block,struct file_lock * fl)759 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl)
760 {
761 struct nlm_rqst *req;
762 int status;
763
764 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n"
765 " Attempting to cancel lock.\n");
766
767 req = nlm_alloc_call(host);
768 if (!req)
769 return -ENOMEM;
770 req->a_flags = RPC_TASK_ASYNC;
771
772 nlmclnt_setlockargs(req, fl);
773 req->a_args.block = block;
774
775 refcount_inc(&req->a_count);
776 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req,
777 NLMPROC_CANCEL, &nlmclnt_cancel_ops);
778 if (status == 0 && req->a_res.status == nlm_lck_denied)
779 status = -ENOLCK;
780 nlmclnt_release_call(req);
781 return status;
782 }
783
nlmclnt_cancel_callback(struct rpc_task * task,void * data)784 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
785 {
786 struct nlm_rqst *req = data;
787 u32 status = ntohl(req->a_res.status);
788
789 if (RPC_SIGNALLED(task))
790 goto die;
791
792 if (task->tk_status < 0) {
793 dprintk("lockd: CANCEL call error %d, retrying.\n",
794 task->tk_status);
795 goto retry_cancel;
796 }
797
798 switch (status) {
799 case NLM_LCK_GRANTED:
800 case NLM_LCK_DENIED_GRACE_PERIOD:
801 case NLM_LCK_DENIED:
802 /* Everything's good */
803 break;
804 case NLM_LCK_DENIED_NOLOCKS:
805 dprintk("lockd: CANCEL failed (server has no locks)\n");
806 goto retry_cancel;
807 default:
808 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n",
809 status);
810 }
811
812 die:
813 return;
814
815 retry_cancel:
816 /* Don't ever retry more than 3 times */
817 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES)
818 goto die;
819 nlm_rebind_host(req->a_host);
820 rpc_restart_call(task);
821 rpc_delay(task, 30 * HZ);
822 }
823
824 static const struct rpc_call_ops nlmclnt_cancel_ops = {
825 .rpc_call_done = nlmclnt_cancel_callback,
826 .rpc_release = nlmclnt_rpc_release,
827 };
828
829 /*
830 * Convert an NLM status code to a generic kernel errno
831 */
832 static int
nlm_stat_to_errno(__be32 status)833 nlm_stat_to_errno(__be32 status)
834 {
835 switch(ntohl(status)) {
836 case NLM_LCK_GRANTED:
837 return 0;
838 case NLM_LCK_DENIED:
839 return -EAGAIN;
840 case NLM_LCK_DENIED_NOLOCKS:
841 case NLM_LCK_DENIED_GRACE_PERIOD:
842 return -ENOLCK;
843 case NLM_LCK_BLOCKED:
844 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n");
845 return -ENOLCK;
846 #ifdef CONFIG_LOCKD_V4
847 case NLM_DEADLCK:
848 return -EDEADLK;
849 case NLM_ROFS:
850 return -EROFS;
851 case NLM_STALE_FH:
852 return -ESTALE;
853 case NLM_FBIG:
854 return -EOVERFLOW;
855 case NLM_FAILED:
856 return -ENOLCK;
857 #endif
858 }
859 printk(KERN_NOTICE "lockd: unexpected server status %d\n",
860 ntohl(status));
861 return -ENOLCK;
862 }
863