1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* GSSAPI-based RxRPC security
3 *
4 * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/net.h>
11 #include <linux/skbuff.h>
12 #include <linux/slab.h>
13 #include <linux/key-type.h>
14 #include "ar-internal.h"
15 #include "rxgk_common.h"
16
17 /*
18 * Parse the information from a server key
19 */
rxgk_preparse_server_key(struct key_preparsed_payload * prep)20 static int rxgk_preparse_server_key(struct key_preparsed_payload *prep)
21 {
22 const struct krb5_enctype *krb5;
23 struct krb5_buffer *server_key = (void *)&prep->payload.data[2];
24 unsigned int service, sec_class, kvno, enctype;
25 int n = 0;
26
27 _enter("%zu", prep->datalen);
28
29 if (sscanf(prep->orig_description, "%u:%u:%u:%u%n",
30 &service, &sec_class, &kvno, &enctype, &n) != 4)
31 return -EINVAL;
32
33 if (prep->orig_description[n])
34 return -EINVAL;
35
36 krb5 = crypto_krb5_find_enctype(enctype);
37 if (!krb5)
38 return -ENOPKG;
39
40 prep->payload.data[0] = (struct krb5_enctype *)krb5;
41
42 if (prep->datalen != krb5->key_len)
43 return -EKEYREJECTED;
44
45 server_key->len = prep->datalen;
46 server_key->data = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
47 if (!server_key->data)
48 return -ENOMEM;
49
50 _leave(" = 0");
51 return 0;
52 }
53
rxgk_free_server_key(union key_payload * payload)54 static void rxgk_free_server_key(union key_payload *payload)
55 {
56 struct krb5_buffer *server_key = (void *)&payload->data[2];
57
58 kfree_sensitive(server_key->data);
59 }
60
rxgk_free_preparse_server_key(struct key_preparsed_payload * prep)61 static void rxgk_free_preparse_server_key(struct key_preparsed_payload *prep)
62 {
63 rxgk_free_server_key(&prep->payload);
64 }
65
rxgk_destroy_server_key(struct key * key)66 static void rxgk_destroy_server_key(struct key *key)
67 {
68 rxgk_free_server_key(&key->payload);
69 }
70
rxgk_describe_server_key(const struct key * key,struct seq_file * m)71 static void rxgk_describe_server_key(const struct key *key, struct seq_file *m)
72 {
73 const struct krb5_enctype *krb5 = key->payload.data[0];
74
75 if (krb5)
76 seq_printf(m, ": %s", krb5->name);
77 }
78
79 /*
80 * Handle rekeying the connection when we see our limits overrun or when the
81 * far side decided to rekey.
82 *
83 * Returns a ref on the context if successful or -ESTALE if the key is out of
84 * date.
85 */
rxgk_rekey(struct rxrpc_connection * conn,const u16 * specific_key_number)86 static struct rxgk_context *rxgk_rekey(struct rxrpc_connection *conn,
87 const u16 *specific_key_number)
88 {
89 struct rxgk_context *gk, *dead = NULL;
90 unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
91 bool crank = false;
92
93 _enter("%d", specific_key_number ? *specific_key_number : -1);
94
95 mutex_lock(&conn->security_lock);
96
97 current_key = conn->rxgk.key_number;
98 if (!specific_key_number) {
99 key_number = current_key;
100 } else {
101 if (*specific_key_number == (u16)current_key)
102 key_number = current_key;
103 else if (*specific_key_number == (u16)(current_key - 1))
104 key_number = current_key - 1;
105 else if (*specific_key_number == (u16)(current_key + 1))
106 goto crank_window;
107 else
108 goto bad_key;
109 }
110
111 gk = conn->rxgk.keys[key_number & mask];
112 if (!gk)
113 goto generate_key;
114 if (!specific_key_number &&
115 test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
116 goto crank_window;
117
118 grab:
119 refcount_inc(&gk->usage);
120 mutex_unlock(&conn->security_lock);
121 rxgk_put(dead);
122 return gk;
123
124 crank_window:
125 trace_rxrpc_rxgk_rekey(conn, current_key,
126 specific_key_number ? *specific_key_number : -1);
127 if (current_key == UINT_MAX)
128 goto bad_key;
129 if (current_key + 1 == UINT_MAX)
130 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
131
132 key_number = current_key + 1;
133 if (WARN_ON(conn->rxgk.keys[key_number & mask]))
134 goto bad_key;
135 crank = true;
136
137 generate_key:
138 gk = conn->rxgk.keys[current_key & mask];
139 gk = rxgk_generate_transport_key(conn, gk->key, key_number, GFP_NOFS);
140 if (IS_ERR(gk)) {
141 mutex_unlock(&conn->security_lock);
142 return gk;
143 }
144
145 write_lock(&conn->security_use_lock);
146 if (crank) {
147 current_key++;
148 conn->rxgk.key_number = current_key;
149 dead = conn->rxgk.keys[(current_key - 2) & mask];
150 conn->rxgk.keys[(current_key - 2) & mask] = NULL;
151 }
152 conn->rxgk.keys[current_key & mask] = gk;
153 write_unlock(&conn->security_use_lock);
154 goto grab;
155
156 bad_key:
157 mutex_unlock(&conn->security_lock);
158 return ERR_PTR(-ESTALE);
159 }
160
161 /*
162 * Get the specified keying context.
163 *
164 * Returns a ref on the context if successful or -ESTALE if the key is out of
165 * date.
166 */
rxgk_get_key(struct rxrpc_connection * conn,const u16 * specific_key_number)167 static struct rxgk_context *rxgk_get_key(struct rxrpc_connection *conn,
168 const u16 *specific_key_number)
169 {
170 struct rxgk_context *gk;
171 unsigned int key_number, current_key, mask = ARRAY_SIZE(conn->rxgk.keys) - 1;
172
173 _enter("{%u},%d",
174 conn->rxgk.key_number, specific_key_number ? *specific_key_number : -1);
175
176 read_lock(&conn->security_use_lock);
177
178 current_key = conn->rxgk.key_number;
179 if (!specific_key_number) {
180 key_number = current_key;
181 } else {
182 /* Only the bottom 16 bits of the key number are exposed in the
183 * header, so we try and keep the upper 16 bits in step. The
184 * whole 32 bits are used to generate the TK.
185 */
186 if (*specific_key_number == (u16)current_key)
187 key_number = current_key;
188 else if (*specific_key_number == (u16)(current_key - 1))
189 key_number = current_key - 1;
190 else if (*specific_key_number == (u16)(current_key + 1))
191 goto rekey;
192 else
193 goto bad_key;
194 }
195
196 gk = conn->rxgk.keys[key_number & mask];
197 if (!gk)
198 goto slow_path;
199 if (!specific_key_number &&
200 key_number < UINT_MAX) {
201 if (time_after(jiffies, gk->expiry) ||
202 gk->bytes_remaining < 0) {
203 set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
204 goto slow_path;
205 }
206
207 if (test_bit(RXGK_TK_NEEDS_REKEY, &gk->flags))
208 goto slow_path;
209 }
210
211 refcount_inc(&gk->usage);
212 read_unlock(&conn->security_use_lock);
213 return gk;
214
215 rekey:
216 _debug("rekey");
217 if (current_key == UINT_MAX)
218 goto bad_key;
219 gk = conn->rxgk.keys[current_key & mask];
220 if (gk)
221 set_bit(RXGK_TK_NEEDS_REKEY, &gk->flags);
222 slow_path:
223 read_unlock(&conn->security_use_lock);
224 return rxgk_rekey(conn, specific_key_number);
225 bad_key:
226 read_unlock(&conn->security_use_lock);
227 return ERR_PTR(-ESTALE);
228 }
229
230 /*
231 * initialise connection security
232 */
rxgk_init_connection_security(struct rxrpc_connection * conn,struct rxrpc_key_token * token)233 static int rxgk_init_connection_security(struct rxrpc_connection *conn,
234 struct rxrpc_key_token *token)
235 {
236 struct rxgk_context *gk;
237 int ret;
238
239 _enter("{%d,%u},{%x}",
240 conn->debug_id, conn->rxgk.key_number, key_serial(conn->key));
241
242 conn->security_ix = token->security_index;
243 conn->security_level = token->rxgk->level;
244
245 if (rxrpc_conn_is_client(conn)) {
246 conn->rxgk.start_time = ktime_get();
247 do_div(conn->rxgk.start_time, 100);
248 }
249
250 gk = rxgk_generate_transport_key(conn, token->rxgk, conn->rxgk.key_number,
251 GFP_NOFS);
252 if (IS_ERR(gk))
253 return PTR_ERR(gk);
254 conn->rxgk.enctype = gk->krb5->etype;
255 conn->rxgk.keys[gk->key_number & 3] = gk;
256
257 switch (conn->security_level) {
258 case RXRPC_SECURITY_PLAIN:
259 case RXRPC_SECURITY_AUTH:
260 case RXRPC_SECURITY_ENCRYPT:
261 break;
262 default:
263 ret = -EKEYREJECTED;
264 goto error;
265 }
266
267 ret = 0;
268 error:
269 _leave(" = %d", ret);
270 return ret;
271 }
272
273 /*
274 * Clean up the crypto on a call.
275 */
rxgk_free_call_crypto(struct rxrpc_call * call)276 static void rxgk_free_call_crypto(struct rxrpc_call *call)
277 {
278 }
279
280 /*
281 * Work out how much data we can put in a packet.
282 */
rxgk_alloc_txbuf(struct rxrpc_call * call,size_t remain,gfp_t gfp)283 static struct rxrpc_txbuf *rxgk_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
284 {
285 enum krb5_crypto_mode mode;
286 struct rxgk_context *gk;
287 struct rxrpc_txbuf *txb;
288 size_t shdr, alloc, limit, part, offset, gap;
289
290 switch (call->conn->security_level) {
291 default:
292 alloc = umin(remain, RXRPC_JUMBO_DATALEN);
293 return rxrpc_alloc_data_txbuf(call, alloc, 1, gfp);
294 case RXRPC_SECURITY_AUTH:
295 shdr = 0;
296 mode = KRB5_CHECKSUM_MODE;
297 break;
298 case RXRPC_SECURITY_ENCRYPT:
299 shdr = sizeof(struct rxgk_header);
300 mode = KRB5_ENCRYPT_MODE;
301 break;
302 }
303
304 gk = rxgk_get_key(call->conn, NULL);
305 if (IS_ERR(gk))
306 return NULL;
307
308 /* Work out the maximum amount of data that will fit. */
309 alloc = RXRPC_JUMBO_DATALEN;
310 limit = crypto_krb5_how_much_data(gk->krb5, mode, &alloc, &offset);
311
312 if (remain < limit - shdr) {
313 part = remain;
314 alloc = crypto_krb5_how_much_buffer(gk->krb5, mode,
315 shdr + part, &offset);
316 gap = 0;
317 } else {
318 part = limit - shdr;
319 gap = RXRPC_JUMBO_DATALEN - alloc;
320 alloc = RXRPC_JUMBO_DATALEN;
321 }
322
323 rxgk_put(gk);
324
325 txb = rxrpc_alloc_data_txbuf(call, alloc, 16, gfp);
326 if (!txb)
327 return NULL;
328
329 txb->crypto_header = offset;
330 txb->sec_header = shdr;
331 txb->offset += offset + shdr;
332 txb->space = part;
333
334 /* Clear excess space in the packet */
335 if (gap)
336 memset(txb->data + alloc - gap, 0, gap);
337 return txb;
338 }
339
340 /*
341 * Integrity mode (sign a packet - level 1 security)
342 */
rxgk_secure_packet_integrity(const struct rxrpc_call * call,struct rxgk_context * gk,struct rxrpc_txbuf * txb)343 static int rxgk_secure_packet_integrity(const struct rxrpc_call *call,
344 struct rxgk_context *gk,
345 struct rxrpc_txbuf *txb)
346 {
347 struct rxgk_header *hdr;
348 struct scatterlist sg[1];
349 struct krb5_buffer metadata;
350 int ret = -ENOMEM;
351
352 _enter("");
353
354 hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
355 if (!hdr)
356 goto error_gk;
357
358 hdr->epoch = htonl(call->conn->proto.epoch);
359 hdr->cid = htonl(call->cid);
360 hdr->call_number = htonl(call->call_id);
361 hdr->seq = htonl(txb->seq);
362 hdr->sec_index = htonl(call->security_ix);
363 hdr->data_len = htonl(txb->len);
364 metadata.len = sizeof(*hdr);
365 metadata.data = hdr;
366
367 sg_init_table(sg, 1);
368 sg_set_buf(&sg[0], txb->data, txb->alloc_size);
369
370 ret = crypto_krb5_get_mic(gk->krb5, gk->tx_Kc, &metadata,
371 sg, 1, txb->alloc_size,
372 txb->crypto_header, txb->sec_header + txb->len);
373 if (ret >= 0) {
374 txb->pkt_len = ret;
375 if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
376 txb->jumboable = true;
377 gk->bytes_remaining -= ret;
378 }
379 kfree(hdr);
380 error_gk:
381 rxgk_put(gk);
382 _leave(" = %d", ret);
383 return ret;
384 }
385
386 /*
387 * wholly encrypt a packet (level 2 security)
388 */
rxgk_secure_packet_encrypted(const struct rxrpc_call * call,struct rxgk_context * gk,struct rxrpc_txbuf * txb)389 static int rxgk_secure_packet_encrypted(const struct rxrpc_call *call,
390 struct rxgk_context *gk,
391 struct rxrpc_txbuf *txb)
392 {
393 struct rxgk_header *hdr;
394 struct scatterlist sg[1];
395 int ret;
396
397 _enter("%x", txb->len);
398
399 /* Insert the header into the buffer. */
400 hdr = txb->data + txb->crypto_header;
401 hdr->epoch = htonl(call->conn->proto.epoch);
402 hdr->cid = htonl(call->cid);
403 hdr->call_number = htonl(call->call_id);
404 hdr->seq = htonl(txb->seq);
405 hdr->sec_index = htonl(call->security_ix);
406 hdr->data_len = htonl(txb->len);
407
408 sg_init_table(sg, 1);
409 sg_set_buf(&sg[0], txb->data, txb->alloc_size);
410
411 ret = crypto_krb5_encrypt(gk->krb5, gk->tx_enc,
412 sg, 1, txb->alloc_size,
413 txb->crypto_header, txb->sec_header + txb->len,
414 false);
415 if (ret >= 0) {
416 txb->pkt_len = ret;
417 if (txb->alloc_size == RXRPC_JUMBO_DATALEN)
418 txb->jumboable = true;
419 gk->bytes_remaining -= ret;
420 }
421
422 rxgk_put(gk);
423 _leave(" = %d", ret);
424 return ret;
425 }
426
427 /*
428 * checksum an RxRPC packet header
429 */
rxgk_secure_packet(struct rxrpc_call * call,struct rxrpc_txbuf * txb)430 static int rxgk_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
431 {
432 struct rxgk_context *gk;
433 int ret;
434
435 _enter("{%d{%x}},{#%u},%u,",
436 call->debug_id, key_serial(call->conn->key), txb->seq, txb->len);
437
438 gk = rxgk_get_key(call->conn, NULL);
439 if (IS_ERR(gk))
440 return PTR_ERR(gk) == -ESTALE ? -EKEYREJECTED : PTR_ERR(gk);
441
442 ret = key_validate(call->conn->key);
443 if (ret < 0) {
444 rxgk_put(gk);
445 return ret;
446 }
447
448 call->security_enctype = gk->krb5->etype;
449 txb->cksum = htons(gk->key_number);
450
451 switch (call->conn->security_level) {
452 case RXRPC_SECURITY_PLAIN:
453 rxgk_put(gk);
454 txb->pkt_len = txb->len;
455 return 0;
456 case RXRPC_SECURITY_AUTH:
457 return rxgk_secure_packet_integrity(call, gk, txb);
458 case RXRPC_SECURITY_ENCRYPT:
459 return rxgk_secure_packet_encrypted(call, gk, txb);
460 default:
461 rxgk_put(gk);
462 return -EPERM;
463 }
464 }
465
466 /*
467 * Integrity mode (check the signature on a packet - level 1 security)
468 */
rxgk_verify_packet_integrity(struct rxrpc_call * call,struct rxgk_context * gk,struct sk_buff * skb)469 static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
470 struct rxgk_context *gk,
471 struct sk_buff *skb)
472 {
473 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
474 struct rxgk_header *hdr;
475 struct krb5_buffer metadata;
476 unsigned int offset = sp->offset, len = sp->len;
477 size_t data_offset = 0, data_len = len;
478 u32 ac;
479 int ret = -ENOMEM;
480
481 _enter("");
482
483 crypto_krb5_where_is_the_data(gk->krb5, KRB5_CHECKSUM_MODE,
484 &data_offset, &data_len);
485
486 hdr = kzalloc(sizeof(*hdr), GFP_NOFS);
487 if (!hdr)
488 goto put_gk;
489
490 hdr->epoch = htonl(call->conn->proto.epoch);
491 hdr->cid = htonl(call->cid);
492 hdr->call_number = htonl(call->call_id);
493 hdr->seq = htonl(sp->hdr.seq);
494 hdr->sec_index = htonl(call->security_ix);
495 hdr->data_len = htonl(data_len);
496
497 metadata.len = sizeof(*hdr);
498 metadata.data = hdr;
499 ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata,
500 skb, &offset, &len, &ac);
501 kfree(hdr);
502 if (ret == -EPROTO) {
503 rxrpc_abort_eproto(call, skb, ac,
504 rxgk_abort_1_verify_mic_eproto);
505 } else {
506 sp->offset = offset;
507 sp->len = len;
508 }
509
510 put_gk:
511 rxgk_put(gk);
512 _leave(" = %d", ret);
513 return ret;
514 }
515
516 /*
517 * Decrypt an encrypted packet (level 2 security).
518 */
rxgk_verify_packet_encrypted(struct rxrpc_call * call,struct rxgk_context * gk,struct sk_buff * skb)519 static int rxgk_verify_packet_encrypted(struct rxrpc_call *call,
520 struct rxgk_context *gk,
521 struct sk_buff *skb)
522 {
523 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
524 struct rxgk_header hdr;
525 unsigned int offset = sp->offset, len = sp->len;
526 int ret;
527 u32 ac;
528
529 _enter("");
530
531 ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac);
532 if (ret == -EPROTO)
533 rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
534 if (ret < 0)
535 goto error;
536
537 if (len < sizeof(hdr)) {
538 ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
539 rxgk_abort_2_short_header);
540 goto error;
541 }
542
543 /* Extract the header from the skb */
544 ret = skb_copy_bits(skb, offset, &hdr, sizeof(hdr));
545 if (ret < 0) {
546 ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
547 rxgk_abort_2_short_encdata);
548 goto error;
549 }
550 offset += sizeof(hdr);
551 len -= sizeof(hdr);
552
553 if (ntohl(hdr.epoch) != call->conn->proto.epoch ||
554 ntohl(hdr.cid) != call->cid ||
555 ntohl(hdr.call_number) != call->call_id ||
556 ntohl(hdr.seq) != sp->hdr.seq ||
557 ntohl(hdr.sec_index) != call->security_ix ||
558 ntohl(hdr.data_len) > len) {
559 ret = rxrpc_abort_eproto(call, skb, RXGK_SEALEDINCON,
560 rxgk_abort_2_short_data);
561 goto error;
562 }
563
564 sp->offset = offset;
565 sp->len = ntohl(hdr.data_len);
566 ret = 0;
567 error:
568 rxgk_put(gk);
569 _leave(" = %d", ret);
570 return ret;
571 }
572
573 /*
574 * Verify the security on a received packet or subpacket (if part of a
575 * jumbo packet).
576 */
rxgk_verify_packet(struct rxrpc_call * call,struct sk_buff * skb)577 static int rxgk_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
578 {
579 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
580 struct rxgk_context *gk;
581 u16 key_number = sp->hdr.cksum;
582
583 _enter("{%d{%x}},{#%u}",
584 call->debug_id, key_serial(call->conn->key), sp->hdr.seq);
585
586 gk = rxgk_get_key(call->conn, &key_number);
587 if (IS_ERR(gk)) {
588 switch (PTR_ERR(gk)) {
589 case -ESTALE:
590 return rxrpc_abort_eproto(call, skb, RXGK_BADKEYNO,
591 rxgk_abort_bad_key_number);
592 default:
593 return PTR_ERR(gk);
594 }
595 }
596
597 call->security_enctype = gk->krb5->etype;
598 switch (call->conn->security_level) {
599 case RXRPC_SECURITY_PLAIN:
600 rxgk_put(gk);
601 return 0;
602 case RXRPC_SECURITY_AUTH:
603 return rxgk_verify_packet_integrity(call, gk, skb);
604 case RXRPC_SECURITY_ENCRYPT:
605 return rxgk_verify_packet_encrypted(call, gk, skb);
606 default:
607 rxgk_put(gk);
608 return -ENOANO;
609 }
610 }
611
612 /*
613 * Allocate memory to hold a challenge or a response packet. We're not running
614 * in the io_thread, so we can't use ->tx_alloc.
615 */
rxgk_alloc_packet(size_t total_len)616 static struct page *rxgk_alloc_packet(size_t total_len)
617 {
618 gfp_t gfp = GFP_NOFS;
619 int order;
620
621 order = get_order(total_len);
622 if (order > 0)
623 gfp |= __GFP_COMP;
624 return alloc_pages(gfp, order);
625 }
626
627 /*
628 * Issue a challenge.
629 */
rxgk_issue_challenge(struct rxrpc_connection * conn)630 static int rxgk_issue_challenge(struct rxrpc_connection *conn)
631 {
632 struct rxrpc_wire_header *whdr;
633 struct bio_vec bvec[1];
634 struct msghdr msg;
635 struct page *page;
636 size_t len = sizeof(*whdr) + sizeof(conn->rxgk.nonce);
637 u32 serial;
638 int ret;
639
640 _enter("{%d}", conn->debug_id);
641
642 get_random_bytes(&conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
643
644 /* We can't use conn->tx_alloc without a lock */
645 page = rxgk_alloc_packet(sizeof(*whdr) + sizeof(conn->rxgk.nonce));
646 if (!page)
647 return -ENOMEM;
648
649 bvec_set_page(&bvec[0], page, len, 0);
650 iov_iter_bvec(&msg.msg_iter, WRITE, bvec, 1, len);
651
652 msg.msg_name = &conn->peer->srx.transport;
653 msg.msg_namelen = conn->peer->srx.transport_len;
654 msg.msg_control = NULL;
655 msg.msg_controllen = 0;
656 msg.msg_flags = MSG_SPLICE_PAGES;
657
658 whdr = page_address(page);
659 whdr->epoch = htonl(conn->proto.epoch);
660 whdr->cid = htonl(conn->proto.cid);
661 whdr->callNumber = 0;
662 whdr->seq = 0;
663 whdr->type = RXRPC_PACKET_TYPE_CHALLENGE;
664 whdr->flags = conn->out_clientflag;
665 whdr->userStatus = 0;
666 whdr->securityIndex = conn->security_ix;
667 whdr->_rsvd = 0;
668 whdr->serviceId = htons(conn->service_id);
669
670 memcpy(whdr + 1, conn->rxgk.nonce, sizeof(conn->rxgk.nonce));
671
672 serial = rxrpc_get_next_serials(conn, 1);
673 whdr->serial = htonl(serial);
674
675 trace_rxrpc_tx_challenge(conn, serial, 0, *(u32 *)&conn->rxgk.nonce);
676
677 ret = do_udp_sendmsg(conn->local->socket, &msg, len);
678 if (ret > 0)
679 conn->peer->last_tx_at = ktime_get_seconds();
680 __free_page(page);
681
682 if (ret < 0) {
683 trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
684 rxrpc_tx_point_rxgk_challenge);
685 return -EAGAIN;
686 }
687
688 trace_rxrpc_tx_packet(conn->debug_id, whdr,
689 rxrpc_tx_point_rxgk_challenge);
690 _leave(" = 0");
691 return 0;
692 }
693
694 /*
695 * Validate a challenge packet.
696 */
rxgk_validate_challenge(struct rxrpc_connection * conn,struct sk_buff * skb)697 static bool rxgk_validate_challenge(struct rxrpc_connection *conn,
698 struct sk_buff *skb)
699 {
700 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
701 u8 nonce[20];
702
703 if (!conn->key) {
704 rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
705 rxgk_abort_chall_no_key);
706 return false;
707 }
708
709 if (key_validate(conn->key) < 0) {
710 rxrpc_abort_conn(conn, skb, RXGK_EXPIRED, -EPROTO,
711 rxgk_abort_chall_key_expired);
712 return false;
713 }
714
715 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
716 nonce, sizeof(nonce)) < 0) {
717 rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
718 rxgk_abort_chall_short);
719 return false;
720 }
721
722 trace_rxrpc_rx_challenge(conn, sp->hdr.serial, 0, *(u32 *)nonce, 0);
723 return true;
724 }
725
726 /**
727 * rxgk_kernel_query_challenge - Query RxGK-specific challenge parameters
728 * @challenge: The challenge packet to query
729 *
730 * Return: The Kerberos 5 encoding type for the challenged connection.
731 */
rxgk_kernel_query_challenge(struct sk_buff * challenge)732 u32 rxgk_kernel_query_challenge(struct sk_buff *challenge)
733 {
734 struct rxrpc_skb_priv *sp = rxrpc_skb(challenge);
735
736 return sp->chall.conn->rxgk.enctype;
737 }
738 EXPORT_SYMBOL(rxgk_kernel_query_challenge);
739
740 /*
741 * Fill out the control message to pass to userspace to inform about the
742 * challenge.
743 */
rxgk_challenge_to_recvmsg(struct rxrpc_connection * conn,struct sk_buff * challenge,struct msghdr * msg)744 static int rxgk_challenge_to_recvmsg(struct rxrpc_connection *conn,
745 struct sk_buff *challenge,
746 struct msghdr *msg)
747 {
748 struct rxgk_challenge chall;
749
750 chall.base.service_id = conn->service_id;
751 chall.base.security_index = conn->security_ix;
752 chall.enctype = conn->rxgk.enctype;
753
754 return put_cmsg(msg, SOL_RXRPC, RXRPC_CHALLENGED, sizeof(chall), &chall);
755 }
756
757 /*
758 * Insert the requisite amount of XDR padding for the length given.
759 */
rxgk_pad_out(struct sk_buff * response,size_t len,size_t offset)760 static int rxgk_pad_out(struct sk_buff *response, size_t len, size_t offset)
761 {
762 __be32 zero = 0;
763 size_t pad = xdr_round_up(len) - len;
764 int ret;
765
766 if (!pad)
767 return 0;
768
769 ret = skb_store_bits(response, offset, &zero, pad);
770 if (ret < 0)
771 return ret;
772 return pad;
773 }
774
775 /*
776 * Insert the header into the response.
777 */
rxgk_insert_response_header(struct rxrpc_connection * conn,struct rxgk_context * gk,struct sk_buff * response,size_t offset)778 static noinline ssize_t rxgk_insert_response_header(struct rxrpc_connection *conn,
779 struct rxgk_context *gk,
780 struct sk_buff *response,
781 size_t offset)
782 {
783 struct rxrpc_skb_priv *rsp = rxrpc_skb(response);
784
785 struct {
786 struct rxrpc_wire_header whdr;
787 __be32 start_time_msw;
788 __be32 start_time_lsw;
789 __be32 ticket_len;
790 } h;
791 int ret;
792
793 rsp->resp.kvno = gk->key_number;
794 rsp->resp.version = gk->krb5->etype;
795
796 h.whdr.epoch = htonl(conn->proto.epoch);
797 h.whdr.cid = htonl(conn->proto.cid);
798 h.whdr.callNumber = 0;
799 h.whdr.serial = 0;
800 h.whdr.seq = 0;
801 h.whdr.type = RXRPC_PACKET_TYPE_RESPONSE;
802 h.whdr.flags = conn->out_clientflag;
803 h.whdr.userStatus = 0;
804 h.whdr.securityIndex = conn->security_ix;
805 h.whdr.cksum = htons(gk->key_number);
806 h.whdr.serviceId = htons(conn->service_id);
807 h.start_time_msw = htonl(upper_32_bits(conn->rxgk.start_time));
808 h.start_time_lsw = htonl(lower_32_bits(conn->rxgk.start_time));
809 h.ticket_len = htonl(gk->key->ticket.len);
810
811 ret = skb_store_bits(response, offset, &h, sizeof(h));
812 return ret < 0 ? ret : sizeof(h);
813 }
814
815 /*
816 * Construct the authenticator to go in the response packet
817 *
818 * struct RXGK_Authenticator {
819 * opaque nonce[20];
820 * opaque appdata<>;
821 * RXGK_Level level;
822 * unsigned int epoch;
823 * unsigned int cid;
824 * unsigned int call_numbers<>;
825 * };
826 */
rxgk_construct_authenticator(struct rxrpc_connection * conn,struct sk_buff * challenge,const struct krb5_buffer * appdata,struct sk_buff * response,size_t offset)827 static ssize_t rxgk_construct_authenticator(struct rxrpc_connection *conn,
828 struct sk_buff *challenge,
829 const struct krb5_buffer *appdata,
830 struct sk_buff *response,
831 size_t offset)
832 {
833 struct {
834 u8 nonce[20];
835 __be32 appdata_len;
836 } a;
837 struct {
838 __be32 level;
839 __be32 epoch;
840 __be32 cid;
841 __be32 call_numbers_count;
842 __be32 call_numbers[4];
843 } b;
844 int ret;
845
846 ret = skb_copy_bits(challenge, sizeof(struct rxrpc_wire_header),
847 a.nonce, sizeof(a.nonce));
848 if (ret < 0)
849 return -EPROTO;
850
851 a.appdata_len = htonl(appdata->len);
852
853 ret = skb_store_bits(response, offset, &a, sizeof(a));
854 if (ret < 0)
855 return ret;
856 offset += sizeof(a);
857
858 if (appdata->len) {
859 ret = skb_store_bits(response, offset, appdata->data, appdata->len);
860 if (ret < 0)
861 return ret;
862 offset += appdata->len;
863
864 ret = rxgk_pad_out(response, appdata->len, offset);
865 if (ret < 0)
866 return ret;
867 offset += ret;
868 }
869
870 b.level = htonl(conn->security_level);
871 b.epoch = htonl(conn->proto.epoch);
872 b.cid = htonl(conn->proto.cid);
873 b.call_numbers_count = htonl(4);
874 b.call_numbers[0] = htonl(conn->channels[0].call_counter);
875 b.call_numbers[1] = htonl(conn->channels[1].call_counter);
876 b.call_numbers[2] = htonl(conn->channels[2].call_counter);
877 b.call_numbers[3] = htonl(conn->channels[3].call_counter);
878
879 ret = skb_store_bits(response, offset, &b, sizeof(b));
880 if (ret < 0)
881 return ret;
882 return sizeof(a) + xdr_round_up(appdata->len) + sizeof(b);
883 }
884
rxgk_encrypt_authenticator(struct rxrpc_connection * conn,struct rxgk_context * gk,struct sk_buff * response,size_t offset,size_t alloc_len,size_t auth_offset,size_t auth_len)885 static ssize_t rxgk_encrypt_authenticator(struct rxrpc_connection *conn,
886 struct rxgk_context *gk,
887 struct sk_buff *response,
888 size_t offset,
889 size_t alloc_len,
890 size_t auth_offset,
891 size_t auth_len)
892 {
893 struct scatterlist sg[16];
894 int nr_sg;
895
896 sg_init_table(sg, ARRAY_SIZE(sg));
897 nr_sg = skb_to_sgvec(response, sg, offset, alloc_len);
898 if (unlikely(nr_sg < 0))
899 return nr_sg;
900 return crypto_krb5_encrypt(gk->krb5, gk->resp_enc, sg, nr_sg, alloc_len,
901 auth_offset, auth_len, false);
902 }
903
904 /*
905 * Construct the response.
906 *
907 * struct RXGK_Response {
908 * rxgkTime start_time;
909 * RXGK_Data token;
910 * opaque authenticator<RXGK_MAXAUTHENTICATOR>
911 * };
912 */
rxgk_construct_response(struct rxrpc_connection * conn,struct sk_buff * challenge,struct krb5_buffer * appdata)913 static int rxgk_construct_response(struct rxrpc_connection *conn,
914 struct sk_buff *challenge,
915 struct krb5_buffer *appdata)
916 {
917 struct rxrpc_skb_priv *csp, *rsp;
918 struct rxgk_context *gk;
919 struct sk_buff *response;
920 size_t len, auth_len, authx_len, offset, auth_offset, authx_offset;
921 __be32 tmp;
922 int ret;
923
924 gk = rxgk_get_key(conn, NULL);
925 if (IS_ERR(gk))
926 return PTR_ERR(gk);
927
928 auth_len = 20 + (4 + appdata->len) + 12 + (1 + 4) * 4;
929 authx_len = crypto_krb5_how_much_buffer(gk->krb5, KRB5_ENCRYPT_MODE,
930 auth_len, &auth_offset);
931 len = sizeof(struct rxrpc_wire_header) +
932 8 + (4 + xdr_round_up(gk->key->ticket.len)) + (4 + authx_len);
933
934 response = alloc_skb_with_frags(0, len, 0, &ret, GFP_NOFS);
935 if (!response)
936 goto error;
937 rxrpc_new_skb(response, rxrpc_skb_new_response_rxgk);
938 response->len = len;
939 response->data_len = len;
940
941 ret = rxgk_insert_response_header(conn, gk, response, 0);
942 if (ret < 0)
943 goto error;
944 offset = ret;
945
946 ret = skb_store_bits(response, offset, gk->key->ticket.data, gk->key->ticket.len);
947 if (ret < 0)
948 goto error;
949 offset += gk->key->ticket.len;
950 ret = rxgk_pad_out(response, gk->key->ticket.len, offset);
951 if (ret < 0)
952 goto error;
953
954 authx_offset = offset + ret + 4; /* Leave a gap for the length. */
955
956 ret = rxgk_construct_authenticator(conn, challenge, appdata, response,
957 authx_offset + auth_offset);
958 if (ret < 0)
959 goto error;
960 auth_len = ret;
961
962 ret = rxgk_encrypt_authenticator(conn, gk, response,
963 authx_offset, authx_len,
964 auth_offset, auth_len);
965 if (ret < 0)
966 goto error;
967 authx_len = ret;
968
969 tmp = htonl(authx_len);
970 ret = skb_store_bits(response, authx_offset - 4, &tmp, 4);
971 if (ret < 0)
972 goto error;
973
974 ret = rxgk_pad_out(response, authx_len, authx_offset + authx_len);
975 if (ret < 0)
976 goto error;
977 len = authx_offset + authx_len + ret;
978
979 if (len != response->len) {
980 response->len = len;
981 response->data_len = len;
982 }
983
984 csp = rxrpc_skb(challenge);
985 rsp = rxrpc_skb(response);
986 rsp->resp.len = len;
987 rsp->resp.challenge_serial = csp->hdr.serial;
988 rxrpc_post_response(conn, response);
989 response = NULL;
990 ret = 0;
991
992 error:
993 rxrpc_free_skb(response, rxrpc_skb_put_response);
994 rxgk_put(gk);
995 _leave(" = %d", ret);
996 return ret;
997 }
998
999 /*
1000 * Respond to a challenge packet.
1001 */
rxgk_respond_to_challenge(struct rxrpc_connection * conn,struct sk_buff * challenge,struct krb5_buffer * appdata)1002 static int rxgk_respond_to_challenge(struct rxrpc_connection *conn,
1003 struct sk_buff *challenge,
1004 struct krb5_buffer *appdata)
1005 {
1006 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
1007
1008 if (key_validate(conn->key) < 0)
1009 return rxrpc_abort_conn(conn, NULL, RXGK_EXPIRED, -EPROTO,
1010 rxgk_abort_chall_key_expired);
1011
1012 return rxgk_construct_response(conn, challenge, appdata);
1013 }
1014
rxgk_respond_to_challenge_no_appdata(struct rxrpc_connection * conn,struct sk_buff * challenge)1015 static int rxgk_respond_to_challenge_no_appdata(struct rxrpc_connection *conn,
1016 struct sk_buff *challenge)
1017 {
1018 struct krb5_buffer appdata = {};
1019
1020 return rxgk_respond_to_challenge(conn, challenge, &appdata);
1021 }
1022
1023 /**
1024 * rxgk_kernel_respond_to_challenge - Respond to a challenge with appdata
1025 * @challenge: The challenge to respond to
1026 * @appdata: The application data to include in the RESPONSE authenticator
1027 *
1028 * Allow a kernel application to respond to a CHALLENGE with application data
1029 * to be included in the RxGK RESPONSE Authenticator.
1030 *
1031 * Return: %0 if successful and a negative error code otherwise.
1032 */
rxgk_kernel_respond_to_challenge(struct sk_buff * challenge,struct krb5_buffer * appdata)1033 int rxgk_kernel_respond_to_challenge(struct sk_buff *challenge,
1034 struct krb5_buffer *appdata)
1035 {
1036 struct rxrpc_skb_priv *csp = rxrpc_skb(challenge);
1037
1038 return rxgk_respond_to_challenge(csp->chall.conn, challenge, appdata);
1039 }
1040 EXPORT_SYMBOL(rxgk_kernel_respond_to_challenge);
1041
1042 /*
1043 * Parse sendmsg() control message and respond to challenge. We need to see if
1044 * there's an appdata to fish out.
1045 */
rxgk_sendmsg_respond_to_challenge(struct sk_buff * challenge,struct msghdr * msg)1046 static int rxgk_sendmsg_respond_to_challenge(struct sk_buff *challenge,
1047 struct msghdr *msg)
1048 {
1049 struct krb5_buffer appdata = {};
1050 struct cmsghdr *cmsg;
1051
1052 for_each_cmsghdr(cmsg, msg) {
1053 if (cmsg->cmsg_level != SOL_RXRPC ||
1054 cmsg->cmsg_type != RXRPC_RESP_RXGK_APPDATA)
1055 continue;
1056 if (appdata.data)
1057 return -EINVAL;
1058 appdata.data = CMSG_DATA(cmsg);
1059 appdata.len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1060 }
1061
1062 return rxgk_kernel_respond_to_challenge(challenge, &appdata);
1063 }
1064
1065 /*
1066 * Verify the authenticator.
1067 *
1068 * struct RXGK_Authenticator {
1069 * opaque nonce[20];
1070 * opaque appdata<>;
1071 * RXGK_Level level;
1072 * unsigned int epoch;
1073 * unsigned int cid;
1074 * unsigned int call_numbers<>;
1075 * };
1076 */
rxgk_do_verify_authenticator(struct rxrpc_connection * conn,const struct krb5_enctype * krb5,struct sk_buff * skb,__be32 * p,__be32 * end)1077 static int rxgk_do_verify_authenticator(struct rxrpc_connection *conn,
1078 const struct krb5_enctype *krb5,
1079 struct sk_buff *skb,
1080 __be32 *p, __be32 *end)
1081 {
1082 u32 app_len, call_count, level, epoch, cid, i;
1083
1084 _enter("");
1085
1086 if (memcmp(p, conn->rxgk.nonce, 20) != 0)
1087 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1088 rxgk_abort_resp_bad_nonce);
1089 p += 20 / sizeof(__be32);
1090
1091 app_len = ntohl(*p++);
1092 if (app_len > (end - p) * sizeof(__be32))
1093 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1094 rxgk_abort_resp_short_applen);
1095
1096 p += xdr_round_up(app_len) / sizeof(__be32);
1097 if (end - p < 4)
1098 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1099 rxgk_abort_resp_short_applen);
1100
1101 level = ntohl(*p++);
1102 epoch = ntohl(*p++);
1103 cid = ntohl(*p++);
1104 call_count = ntohl(*p++);
1105
1106 if (level != conn->security_level ||
1107 epoch != conn->proto.epoch ||
1108 cid != conn->proto.cid ||
1109 call_count > 4)
1110 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1111 rxgk_abort_resp_bad_param);
1112
1113 if (end - p < call_count)
1114 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1115 rxgk_abort_resp_short_call_list);
1116
1117 for (i = 0; i < call_count; i++) {
1118 u32 call_id = ntohl(*p++);
1119
1120 if (call_id > INT_MAX)
1121 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1122 rxgk_abort_resp_bad_callid);
1123
1124 if (call_id < conn->channels[i].call_counter)
1125 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1126 rxgk_abort_resp_call_ctr);
1127
1128 if (call_id > conn->channels[i].call_counter) {
1129 if (conn->channels[i].call)
1130 return rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1131 rxgk_abort_resp_call_state);
1132
1133 conn->channels[i].call_counter = call_id;
1134 }
1135 }
1136
1137 _leave(" = 0");
1138 return 0;
1139 }
1140
1141 /*
1142 * Extract the authenticator and verify it.
1143 */
rxgk_verify_authenticator(struct rxrpc_connection * conn,const struct krb5_enctype * krb5,struct sk_buff * skb,unsigned int auth_offset,unsigned int auth_len)1144 static int rxgk_verify_authenticator(struct rxrpc_connection *conn,
1145 const struct krb5_enctype *krb5,
1146 struct sk_buff *skb,
1147 unsigned int auth_offset, unsigned int auth_len)
1148 {
1149 void *auth;
1150 __be32 *p;
1151 int ret;
1152
1153 auth = kmalloc(auth_len, GFP_NOFS);
1154 if (!auth)
1155 return -ENOMEM;
1156
1157 ret = skb_copy_bits(skb, auth_offset, auth, auth_len);
1158 if (ret < 0) {
1159 ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EPROTO,
1160 rxgk_abort_resp_short_auth);
1161 goto error;
1162 }
1163
1164 p = auth;
1165 ret = rxgk_do_verify_authenticator(conn, krb5, skb, p, p + auth_len);
1166 error:
1167 kfree(auth);
1168 return ret;
1169 }
1170
1171 /*
1172 * Verify a response.
1173 *
1174 * struct RXGK_Response {
1175 * rxgkTime start_time;
1176 * RXGK_Data token;
1177 * opaque authenticator<RXGK_MAXAUTHENTICATOR>
1178 * };
1179 */
rxgk_verify_response(struct rxrpc_connection * conn,struct sk_buff * skb)1180 static int rxgk_verify_response(struct rxrpc_connection *conn,
1181 struct sk_buff *skb)
1182 {
1183 const struct krb5_enctype *krb5;
1184 struct rxrpc_key_token *token;
1185 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1186 struct rxgk_response rhdr;
1187 struct rxgk_context *gk;
1188 struct key *key = NULL;
1189 unsigned int offset = sizeof(struct rxrpc_wire_header);
1190 unsigned int len = skb->len - sizeof(struct rxrpc_wire_header);
1191 unsigned int token_offset, token_len;
1192 unsigned int auth_offset, auth_len;
1193 __be32 xauth_len;
1194 int ret, ec;
1195
1196 _enter("{%d}", conn->debug_id);
1197
1198 /* Parse the RXGK_Response object */
1199 if (sizeof(rhdr) + sizeof(__be32) > len)
1200 goto short_packet;
1201
1202 if (skb_copy_bits(skb, offset, &rhdr, sizeof(rhdr)) < 0)
1203 goto short_packet;
1204 offset += sizeof(rhdr);
1205 len -= sizeof(rhdr);
1206
1207 token_offset = offset;
1208 token_len = ntohl(rhdr.token_len);
1209 if (xdr_round_up(token_len) + sizeof(__be32) > len)
1210 goto short_packet;
1211
1212 trace_rxrpc_rx_response(conn, sp->hdr.serial, 0, sp->hdr.cksum, token_len);
1213
1214 offset += xdr_round_up(token_len);
1215 len -= xdr_round_up(token_len);
1216
1217 if (skb_copy_bits(skb, offset, &xauth_len, sizeof(xauth_len)) < 0)
1218 goto short_packet;
1219 offset += sizeof(xauth_len);
1220 len -= sizeof(xauth_len);
1221
1222 auth_offset = offset;
1223 auth_len = ntohl(xauth_len);
1224 if (auth_len < len)
1225 goto short_packet;
1226 if (auth_len & 3)
1227 goto inconsistent;
1228 if (auth_len < 20 + 9 * 4)
1229 goto auth_too_short;
1230
1231 /* We need to extract and decrypt the token and instantiate a session
1232 * key for it. This bit, however, is application-specific. If
1233 * possible, we use a default parser, but we might end up bumping this
1234 * to the app to deal with - which might mean a round trip to
1235 * userspace.
1236 */
1237 ret = rxgk_extract_token(conn, skb, token_offset, token_len, &key);
1238 if (ret < 0)
1239 goto out;
1240
1241 /* We now have a key instantiated from the decrypted ticket. We can
1242 * pass this to the application so that they can parse the ticket
1243 * content and we can use the session key it contains to derive the
1244 * keys we need.
1245 *
1246 * Note that we have to switch enctype at this point as the enctype of
1247 * the ticket doesn't necessarily match that of the transport.
1248 */
1249 token = key->payload.data[0];
1250 conn->security_level = token->rxgk->level;
1251 conn->rxgk.start_time = __be64_to_cpu(rhdr.start_time);
1252
1253 gk = rxgk_generate_transport_key(conn, token->rxgk, sp->hdr.cksum, GFP_NOFS);
1254 if (IS_ERR(gk)) {
1255 ret = PTR_ERR(gk);
1256 goto cant_get_token;
1257 }
1258
1259 krb5 = gk->krb5;
1260
1261 trace_rxrpc_rx_response(conn, sp->hdr.serial, krb5->etype, sp->hdr.cksum, token_len);
1262
1263 /* Decrypt, parse and verify the authenticator. */
1264 ret = rxgk_decrypt_skb(krb5, gk->resp_enc, skb,
1265 &auth_offset, &auth_len, &ec);
1266 if (ret < 0) {
1267 rxrpc_abort_conn(conn, skb, RXGK_SEALEDINCON, ret,
1268 rxgk_abort_resp_auth_dec);
1269 goto out;
1270 }
1271
1272 ret = rxgk_verify_authenticator(conn, krb5, skb, auth_offset, auth_len);
1273 if (ret < 0)
1274 goto out;
1275
1276 conn->key = key;
1277 key = NULL;
1278 ret = 0;
1279 out:
1280 key_put(key);
1281 _leave(" = %d", ret);
1282 return ret;
1283
1284 inconsistent:
1285 ret = rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO,
1286 rxgk_abort_resp_xdr_align);
1287 goto out;
1288 auth_too_short:
1289 ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1290 rxgk_abort_resp_short_auth);
1291 goto out;
1292 short_packet:
1293 ret = rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
1294 rxgk_abort_resp_short_packet);
1295 goto out;
1296
1297 cant_get_token:
1298 switch (ret) {
1299 case -ENOMEM:
1300 goto temporary_error;
1301 case -EINVAL:
1302 ret = rxrpc_abort_conn(conn, skb, RXGK_NOTAUTH, -EKEYREJECTED,
1303 rxgk_abort_resp_internal_error);
1304 goto out;
1305 case -ENOPKG:
1306 ret = rxrpc_abort_conn(conn, skb, KRB5_PROG_KEYTYPE_NOSUPP,
1307 -EKEYREJECTED, rxgk_abort_resp_nopkg);
1308 goto out;
1309 }
1310
1311 temporary_error:
1312 /* Ignore the response packet if we got a temporary error such as
1313 * ENOMEM. We just want to send the challenge again. Note that we
1314 * also come out this way if the ticket decryption fails.
1315 */
1316 goto out;
1317 }
1318
1319 /*
1320 * clear the connection security
1321 */
rxgk_clear(struct rxrpc_connection * conn)1322 static void rxgk_clear(struct rxrpc_connection *conn)
1323 {
1324 int i;
1325
1326 for (i = 0; i < ARRAY_SIZE(conn->rxgk.keys); i++)
1327 rxgk_put(conn->rxgk.keys[i]);
1328 }
1329
1330 /*
1331 * Initialise the RxGK security service.
1332 */
rxgk_init(void)1333 static int rxgk_init(void)
1334 {
1335 return 0;
1336 }
1337
1338 /*
1339 * Clean up the RxGK security service.
1340 */
rxgk_exit(void)1341 static void rxgk_exit(void)
1342 {
1343 }
1344
1345 /*
1346 * RxRPC YFS GSSAPI-based security
1347 */
1348 const struct rxrpc_security rxgk_yfs = {
1349 .name = "yfs-rxgk",
1350 .security_index = RXRPC_SECURITY_YFS_RXGK,
1351 .no_key_abort = RXGK_NOTAUTH,
1352 .init = rxgk_init,
1353 .exit = rxgk_exit,
1354 .preparse_server_key = rxgk_preparse_server_key,
1355 .free_preparse_server_key = rxgk_free_preparse_server_key,
1356 .destroy_server_key = rxgk_destroy_server_key,
1357 .describe_server_key = rxgk_describe_server_key,
1358 .init_connection_security = rxgk_init_connection_security,
1359 .alloc_txbuf = rxgk_alloc_txbuf,
1360 .secure_packet = rxgk_secure_packet,
1361 .verify_packet = rxgk_verify_packet,
1362 .free_call_crypto = rxgk_free_call_crypto,
1363 .issue_challenge = rxgk_issue_challenge,
1364 .validate_challenge = rxgk_validate_challenge,
1365 .challenge_to_recvmsg = rxgk_challenge_to_recvmsg,
1366 .sendmsg_respond_to_challenge = rxgk_sendmsg_respond_to_challenge,
1367 .respond_to_challenge = rxgk_respond_to_challenge_no_appdata,
1368 .verify_response = rxgk_verify_response,
1369 .clear = rxgk_clear,
1370 .default_decode_ticket = rxgk_yfs_decode_ticket,
1371 };
1372