Lines Matching +full:rx +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0
75 STAT_BADKEYS, /* tx only */
76 STAT_BADMSGS = STAT_BADKEYS, /* rx only */
94 * struct tipc_key - TIPC keys' status indicator
97 * +-----+-----+-----+-----+-----+-----+-----+-----+
99 * +-----+-----+-----+-----+-----+-----+-----+-----+
103 #define KEY_MASK ((1 << KEY_BITS) - 1)
109 passive:2, /* rx only */
113 passive:2, /* rx only */
125 * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
133 * struct tipc_aead - TIPC AEAD key structure
134 * @tfm_entry: per-cpu pointer to one entry in TFM list
137 * @users: the number of the key users (TX/RX)
168 * struct tipc_crypto_stats - TIPC Crypto statistics
175 * struct tipc_crypto - TIPC TX/RX crypto structure
177 * @node: TIPC node (RX)
179 * @peer_rx_active: replicated peer RX active key index
180 * @key_gen: TX/RX key generation
184 * @wq: common workqueue on TX crypto
185 * @work: delayed work sched for TX/RX
190 * @sndnxt: the per-peer sndnxt (TX)
234 /* struct tipc_crypto_tx_ctx - TX context for callbacks */
241 /* struct tipc_crypto_rx_ctx - RX context for callbacks */
281 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
282 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
283 struct tipc_crypto *rx,
286 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
301 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
306 #define is_tx(crypto) (!(crypto)->node)
326 * tipc_aead_key_validate - Validate a AEAD user key
333 if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) { in tipc_aead_key_validate()
335 return -ENODEV; in tipc_aead_key_validate()
339 if (strcmp(ukey->alg_name, "gcm(aes)")) { in tipc_aead_key_validate()
341 return -ENOTSUPP; in tipc_aead_key_validate()
345 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_key_validate()
350 return -EKEYREJECTED; in tipc_aead_key_validate()
357 * tipc_aead_key_generate - Generate new session key
369 rc = crypto_rng_get_bytes(crypto_default_rng, skey->key, in tipc_aead_key_generate()
370 skey->keylen); in tipc_aead_key_generate()
383 if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt))) in tipc_aead_get()
392 if (aead && refcount_dec_and_test(&aead->refcnt)) in tipc_aead_put()
393 call_rcu(&aead->rcu, tipc_aead_free); in tipc_aead_put()
397 * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
405 if (aead->cloned) { in tipc_aead_free()
406 tipc_aead_put(aead->cloned); in tipc_aead_free()
408 head = *get_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
409 put_cpu_ptr(aead->tfm_entry); in tipc_aead_free()
410 list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) { in tipc_aead_free()
411 crypto_free_aead(tfm_entry->tfm); in tipc_aead_free()
412 list_del(&tfm_entry->list); in tipc_aead_free()
416 crypto_free_aead(head->tfm); in tipc_aead_free()
417 list_del(&head->list); in tipc_aead_free()
420 free_percpu(aead->tfm_entry); in tipc_aead_free()
421 kfree_sensitive(aead->key); in tipc_aead_free()
433 users = atomic_read(&tmp->users); in tipc_aead_users()
446 atomic_add_unless(&tmp->users, 1, lim); in tipc_aead_users_inc()
457 atomic_add_unless(&rcu_dereference(aead)->users, -1, lim); in tipc_aead_users_dec()
470 cur = atomic_read(&tmp->users); in tipc_aead_users_set()
473 } while (atomic_cmpxchg(&tmp->users, cur, val) != cur); in tipc_aead_users_set()
479 * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
486 tfm_entry = get_cpu_ptr(aead->tfm_entry); in tipc_aead_tfm_next()
488 tfm = (*tfm_entry)->tfm; in tipc_aead_tfm_next()
495 * tipc_aead_init - Initiate TIPC AEAD
517 return -EEXIST; in tipc_aead_init()
522 return -ENOMEM; in tipc_aead_init()
524 /* The key consists of two parts: [AES-KEY][SALT] */ in tipc_aead_init()
525 keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE; in tipc_aead_init()
527 /* Allocate per-cpu TFM entry pointer */ in tipc_aead_init()
528 tmp->tfm_entry = alloc_percpu(struct tipc_tfm *); in tipc_aead_init()
529 if (!tmp->tfm_entry) { in tipc_aead_init()
531 return -ENOMEM; in tipc_aead_init()
536 tfm = crypto_alloc_aead(ukey->alg_name, 0, 0); in tipc_aead_init()
545 err = -ENOTSUPP; in tipc_aead_init()
550 err |= crypto_aead_setkey(tfm, ukey->key, keylen); in tipc_aead_init()
559 err = -ENOMEM; in tipc_aead_init()
562 INIT_LIST_HEAD(&tfm_entry->list); in tipc_aead_init()
563 tfm_entry->tfm = tfm; in tipc_aead_init()
569 *per_cpu_ptr(tmp->tfm_entry, cpu) = head; in tipc_aead_init()
572 list_add_tail(&tfm_entry->list, &head->list); in tipc_aead_init()
579 free_percpu(tmp->tfm_entry); in tipc_aead_init()
585 bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN, in tipc_aead_init()
589 tmp->mode = mode; in tipc_aead_init()
590 tmp->cloned = NULL; in tipc_aead_init()
591 tmp->authsize = TIPC_AES_GCM_TAG_SIZE; in tipc_aead_init()
592 tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL); in tipc_aead_init()
593 if (!tmp->key) { in tipc_aead_init()
594 tipc_aead_free(&tmp->rcu); in tipc_aead_init()
595 return -ENOMEM; in tipc_aead_init()
597 memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE); in tipc_aead_init()
598 atomic_set(&tmp->users, 0); in tipc_aead_init()
599 atomic64_set(&tmp->seqno, 0); in tipc_aead_init()
600 refcount_set(&tmp->refcnt, 1); in tipc_aead_init()
607 * tipc_aead_clone - Clone a TIPC AEAD key
616 * Note: this must be done in cluster-key mode only!
625 return -ENOKEY; in tipc_aead_clone()
627 if (src->mode != CLUSTER_KEY) in tipc_aead_clone()
628 return -EINVAL; in tipc_aead_clone()
631 return -EEXIST; in tipc_aead_clone()
635 return -ENOMEM; in tipc_aead_clone()
637 aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC); in tipc_aead_clone()
638 if (unlikely(!aead->tfm_entry)) { in tipc_aead_clone()
640 return -ENOMEM; in tipc_aead_clone()
644 *per_cpu_ptr(aead->tfm_entry, cpu) = in tipc_aead_clone()
645 *per_cpu_ptr(src->tfm_entry, cpu); in tipc_aead_clone()
648 memcpy(aead->hint, src->hint, sizeof(src->hint)); in tipc_aead_clone()
649 aead->mode = src->mode; in tipc_aead_clone()
650 aead->salt = src->salt; in tipc_aead_clone()
651 aead->authsize = src->authsize; in tipc_aead_clone()
652 atomic_set(&aead->users, 0); in tipc_aead_clone()
653 atomic64_set(&aead->seqno, 0); in tipc_aead_clone()
654 refcount_set(&aead->refcnt, 1); in tipc_aead_clone()
656 WARN_ON(!refcount_inc_not_zero(&src->refcnt)); in tipc_aead_clone()
657 aead->cloned = src; in tipc_aead_clone()
664 * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
692 len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1); in tipc_aead_mem_alloc()
713 * tipc_aead_encrypt - Encrypt a message
722 * -EINPROGRESS/-EBUSY : if a callback will be performed
741 /* Make sure message len at least 4-byte aligned */ in tipc_aead_encrypt()
742 len = ALIGN(skb->len, 4); in tipc_aead_encrypt()
743 tailen = len - skb->len + aead->authsize; in tipc_aead_encrypt()
753 pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n", in tipc_aead_encrypt()
769 pr_err("TX: skb_cow_data() returned %d\n", nsg); in tipc_aead_encrypt()
779 return -ENOMEM; in tipc_aead_encrypt()
780 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_encrypt()
784 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_encrypt()
786 pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg); in tipc_aead_encrypt()
791 * In case we're in cluster-key mode, SALT is varied by xor-ing with in tipc_aead_encrypt()
795 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_encrypt()
796 salt = aead->salt; in tipc_aead_encrypt()
797 if (aead->mode == CLUSTER_KEY) in tipc_aead_encrypt()
798 salt ^= ehdr->addr; /* __be32 */ in tipc_aead_encrypt()
802 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_encrypt()
808 aead_request_set_crypt(req, sg, sg, len - ehsz, iv); in tipc_aead_encrypt()
814 tx_ctx->aead = aead; in tipc_aead_encrypt()
815 tx_ctx->bearer = b; in tipc_aead_encrypt()
816 memcpy(&tx_ctx->dst, dst, sizeof(*dst)); in tipc_aead_encrypt()
820 rc = -ENODEV; in tipc_aead_encrypt()
826 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_encrypt()
833 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_encrypt()
839 struct sk_buff *skb = base->data; in tipc_aead_encrypt_done()
840 struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_encrypt_done()
841 struct tipc_bearer *b = tx_ctx->bearer; in tipc_aead_encrypt_done()
842 struct tipc_aead *aead = tx_ctx->aead; in tipc_aead_encrypt_done()
843 struct tipc_crypto *tx = aead->crypto; in tipc_aead_encrypt_done() local
844 struct net *net = tx->net; in tipc_aead_encrypt_done()
848 this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]); in tipc_aead_encrypt_done()
850 if (likely(test_bit(0, &b->up))) in tipc_aead_encrypt_done()
851 b->media->send_msg(net, skb, b, &tx_ctx->dst); in tipc_aead_encrypt_done()
856 case -EINPROGRESS: in tipc_aead_encrypt_done()
859 this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]); in tipc_aead_encrypt_done()
870 * tipc_aead_decrypt - Decrypt an encrypted message
878 * -EINPROGRESS/-EBUSY : if a callback will be performed
896 return -ENOKEY; in tipc_aead_decrypt()
900 pr_err("RX: skb_cow_data() returned %d\n", nsg); in tipc_aead_decrypt()
908 return -ENOMEM; in tipc_aead_decrypt()
909 TIPC_SKB_CB(skb)->crypto_ctx = ctx; in tipc_aead_decrypt()
913 rc = skb_to_sgvec(skb, sg, 0, skb->len); in tipc_aead_decrypt()
915 pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg); in tipc_aead_decrypt()
920 ehdr = (struct tipc_ehdr *)skb->data; in tipc_aead_decrypt()
921 salt = aead->salt; in tipc_aead_decrypt()
922 if (aead->mode == CLUSTER_KEY) in tipc_aead_decrypt()
923 salt ^= ehdr->addr; /* __be32 */ in tipc_aead_decrypt()
924 else if (ehdr->destined) in tipc_aead_decrypt()
927 memcpy(iv + 4, (u8 *)&ehdr->seqno, 8); in tipc_aead_decrypt()
933 aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv); in tipc_aead_decrypt()
939 rx_ctx->aead = aead; in tipc_aead_decrypt()
940 rx_ctx->bearer = b; in tipc_aead_decrypt()
944 rc = -ENODEV; in tipc_aead_decrypt()
950 if (rc == -EINPROGRESS || rc == -EBUSY) in tipc_aead_decrypt()
957 TIPC_SKB_CB(skb)->crypto_ctx = NULL; in tipc_aead_decrypt()
963 struct sk_buff *skb = base->data; in tipc_aead_decrypt_done()
964 struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx; in tipc_aead_decrypt_done()
965 struct tipc_bearer *b = rx_ctx->bearer; in tipc_aead_decrypt_done()
966 struct tipc_aead *aead = rx_ctx->aead; in tipc_aead_decrypt_done()
967 struct tipc_crypto_stats __percpu *stats = aead->crypto->stats; in tipc_aead_decrypt_done()
968 struct net *net = aead->crypto->net; in tipc_aead_decrypt_done()
972 this_cpu_inc(stats->stat[STAT_ASYNC_OK]); in tipc_aead_decrypt_done()
974 case -EINPROGRESS: in tipc_aead_decrypt_done()
977 this_cpu_inc(stats->stat[STAT_ASYNC_NOK]); in tipc_aead_decrypt_done()
984 if (likely(test_bit(0, &b->up))) in tipc_aead_decrypt_done()
995 return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE; in tipc_ehdr_size()
999 * tipc_ehdr_validate - Validate an encryption message
1012 ehdr = (struct tipc_ehdr *)skb->data; in tipc_ehdr_validate()
1013 if (unlikely(ehdr->version != TIPC_EVERSION)) in tipc_ehdr_validate()
1018 if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE)) in tipc_ehdr_validate()
1025 * tipc_ehdr_build - Build TIPC encryption message header
1027 * @aead: TX AEAD key to be used for the message encryption
1030 * @__rx: RX crypto handle if dest is "known"
1051 * cluster key mode, otherwise it's better for a per-peer seqno! in tipc_ehdr_build()
1053 if (!__rx || aead->mode == CLUSTER_KEY) in tipc_ehdr_build()
1054 seqno = atomic64_inc_return(&aead->seqno); in tipc_ehdr_build()
1056 seqno = atomic64_inc_return(&__rx->sndnxt); in tipc_ehdr_build()
1062 /* Word 1-2 */ in tipc_ehdr_build()
1063 ehdr->seqno = cpu_to_be64(seqno); in tipc_ehdr_build()
1065 /* Words 0, 3- */ in tipc_ehdr_build()
1066 ehdr->version = TIPC_EVERSION; in tipc_ehdr_build()
1067 ehdr->user = 0; in tipc_ehdr_build()
1068 ehdr->keepalive = 0; in tipc_ehdr_build()
1069 ehdr->tx_key = tx_key; in tipc_ehdr_build()
1070 ehdr->destined = (__rx) ? 1 : 0; in tipc_ehdr_build()
1071 ehdr->rx_key_active = (__rx) ? __rx->key.active : 0; in tipc_ehdr_build()
1072 ehdr->rx_nokey = (__rx) ? __rx->nokey : 0; in tipc_ehdr_build()
1073 ehdr->master_key = aead->crypto->key_master; in tipc_ehdr_build()
1074 ehdr->reserved_1 = 0; in tipc_ehdr_build()
1075 ehdr->reserved_2 = 0; in tipc_ehdr_build()
1079 ehdr->user = LINK_CONFIG; in tipc_ehdr_build()
1080 memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN); in tipc_ehdr_build()
1084 ehdr->user = LINK_PROTOCOL; in tipc_ehdr_build()
1085 ehdr->keepalive = msg_is_keepalive(hdr); in tipc_ehdr_build()
1087 ehdr->addr = hdr->hdr[3]; in tipc_ehdr_build()
1099 struct tipc_key old = c->key; in tipc_crypto_key_set_state()
1102 c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) | in tipc_crypto_key_set_state()
1106 pr_debug("%s: key changing %s ::%pS\n", c->name, in tipc_crypto_key_set_state()
1107 tipc_key_change_dump(old, c->key, buf), in tipc_crypto_key_set_state()
1112 * tipc_crypto_key_init - Initiate a new user / AEAD key
1136 tipc_aead_free(&aead->rcu); in tipc_crypto_key_init()
1143 * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
1149 * Return: new key id in case of success, otherwise: -EBUSY
1156 int rc = -EBUSY; in tipc_crypto_key_attach()
1159 spin_lock_bh(&c->lock); in tipc_crypto_key_attach()
1160 key = c->key; in tipc_crypto_key_attach()
1168 if (tipc_aead_users(c->aead[key.pending]) > 0) in tipc_crypto_key_attach()
1190 aead->crypto = c; in tipc_crypto_key_attach()
1191 aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen; in tipc_crypto_key_attach()
1192 tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock); in tipc_crypto_key_attach()
1193 if (likely(c->key.keys != key.keys)) in tipc_crypto_key_attach()
1196 c->working = 1; in tipc_crypto_key_attach()
1197 c->nokey = 0; in tipc_crypto_key_attach()
1198 c->key_master |= master_key; in tipc_crypto_key_attach()
1202 spin_unlock_bh(&c->lock); in tipc_crypto_key_attach()
1208 struct tipc_crypto *tx, *rx; in tipc_crypto_key_flush() local
1211 spin_lock_bh(&c->lock); in tipc_crypto_key_flush()
1214 rx = c; in tipc_crypto_key_flush()
1215 tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_flush()
1216 if (cancel_delayed_work(&rx->work)) { in tipc_crypto_key_flush()
1217 kfree(rx->skey); in tipc_crypto_key_flush()
1218 rx->skey = NULL; in tipc_crypto_key_flush()
1219 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_flush()
1220 tipc_node_put(rx->node); in tipc_crypto_key_flush()
1222 /* RX stopping => decrease TX key users if any */ in tipc_crypto_key_flush()
1223 k = atomic_xchg(&rx->peer_rx_active, 0); in tipc_crypto_key_flush()
1225 tipc_aead_users_dec(tx->aead[k], 0); in tipc_crypto_key_flush()
1226 /* Mark the point TX key users changed */ in tipc_crypto_key_flush()
1227 tx->timer1 = jiffies; in tipc_crypto_key_flush()
1231 c->flags = 0; in tipc_crypto_key_flush()
1234 tipc_crypto_key_detach(c->aead[k], &c->lock); in tipc_crypto_key_flush()
1235 atomic64_set(&c->sndnxt, 0); in tipc_crypto_key_flush()
1236 spin_unlock_bh(&c->lock); in tipc_crypto_key_flush()
1240 * tipc_crypto_key_try_align - Align RX keys if possible
1241 * @rx: RX crypto handle
1242 * @new_pending: new pending slot if aligned (= TX key from peer)
1252 static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending) in tipc_crypto_key_try_align() argument
1260 spin_lock(&rx->lock); in tipc_crypto_key_try_align()
1261 key = rx->key; in tipc_crypto_key_try_align()
1270 if (tipc_aead_users(rx->aead[key.pending]) > 0) in tipc_crypto_key_try_align()
1274 tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock); in tipc_crypto_key_try_align()
1275 if (!refcount_dec_if_one(&tmp1->refcnt)) in tipc_crypto_key_try_align()
1277 rcu_assign_pointer(rx->aead[key.pending], NULL); in tipc_crypto_key_try_align()
1281 tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock)); in tipc_crypto_key_try_align()
1282 x = (key.passive - key.pending + new_pending) % KEY_MAX; in tipc_crypto_key_try_align()
1286 /* Re-allocate the key(s) */ in tipc_crypto_key_try_align()
1287 tipc_crypto_key_set_state(rx, new_passive, 0, new_pending); in tipc_crypto_key_try_align()
1288 rcu_assign_pointer(rx->aead[new_pending], tmp1); in tipc_crypto_key_try_align()
1290 rcu_assign_pointer(rx->aead[new_passive], tmp2); in tipc_crypto_key_try_align()
1291 refcount_set(&tmp1->refcnt, 1); in tipc_crypto_key_try_align()
1293 pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending, in tipc_crypto_key_try_align()
1297 spin_unlock(&rx->lock); in tipc_crypto_key_try_align()
1302 * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
1303 * @tx: TX crypto handle
1304 * @rx: RX crypto handle (can be NULL)
1306 * @tx_key: peer TX key id
1308 * This function looks up the existing TX keys and pick one which is suitable
1312 * Return: the TX AEAD key handle in case of success, otherwise NULL
1314 static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx, in tipc_crypto_key_pick_tx() argument
1315 struct tipc_crypto *rx, in tipc_crypto_key_pick_tx() argument
1321 struct tipc_key key = tx->key; in tipc_crypto_key_pick_tx()
1325 if (!skb_cb->tx_clone_deferred) { in tipc_crypto_key_pick_tx()
1326 skb_cb->tx_clone_deferred = 1; in tipc_crypto_key_pick_tx()
1327 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_key_pick_tx()
1330 skb_cb->tx_clone_ctx.rx = rx; in tipc_crypto_key_pick_tx()
1331 if (++skb_cb->tx_clone_ctx.recurs > 2) in tipc_crypto_key_pick_tx()
1334 /* Pick one TX key */ in tipc_crypto_key_pick_tx()
1335 spin_lock(&tx->lock); in tipc_crypto_key_pick_tx()
1337 aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock); in tipc_crypto_key_pick_tx()
1345 aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock); in tipc_crypto_key_pick_tx()
1348 if (aead->mode != CLUSTER_KEY || in tipc_crypto_key_pick_tx()
1349 aead == skb_cb->tx_clone_ctx.last) { in tipc_crypto_key_pick_tx()
1354 skb_cb->tx_clone_ctx.last = aead; in tipc_crypto_key_pick_tx()
1355 WARN_ON(skb->next); in tipc_crypto_key_pick_tx()
1356 skb->next = skb_clone(skb, GFP_ATOMIC); in tipc_crypto_key_pick_tx()
1357 if (unlikely(!skb->next)) in tipc_crypto_key_pick_tx()
1364 WARN_ON(!refcount_inc_not_zero(&aead->refcnt)); in tipc_crypto_key_pick_tx()
1365 spin_unlock(&tx->lock); in tipc_crypto_key_pick_tx()
1372 * @rx: RX crypto handle
1375 * This function updates the peer node related data as the peer RX active key
1376 * has changed, so the number of TX keys' users on this node are increased and
1383 * The "per-peer" sndnxt is also reset when the peer key has switched.
1385 static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb) in tipc_crypto_key_synch() argument
1388 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_synch() local
1390 u32 self = tipc_own_addr(rx->net); in tipc_crypto_key_synch()
1394 /* Update RX 'key_master' flag according to peer, also mark "legacy" if in tipc_crypto_key_synch()
1397 rx->key_master = ehdr->master_key; in tipc_crypto_key_synch()
1398 if (!rx->key_master) in tipc_crypto_key_synch()
1399 tx->legacy_user = 1; in tipc_crypto_key_synch()
1402 if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self) in tipc_crypto_key_synch()
1406 if (ehdr->rx_nokey) { in tipc_crypto_key_synch()
1408 tx->timer2 = jiffies; in tipc_crypto_key_synch()
1410 if (tx->key.keys && in tipc_crypto_key_synch()
1411 !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) { in tipc_crypto_key_synch()
1415 if (queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_key_synch()
1416 tipc_node_get(rx->node); in tipc_crypto_key_synch()
1420 atomic_xchg(&rx->key_distr, 0); in tipc_crypto_key_synch()
1423 /* Case 2: Peer RX active key has changed, let's update own TX users */ in tipc_crypto_key_synch()
1424 cur = atomic_read(&rx->peer_rx_active); in tipc_crypto_key_synch()
1425 new = ehdr->rx_key_active; in tipc_crypto_key_synch()
1426 if (tx->key.keys && in tipc_crypto_key_synch()
1428 atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) { in tipc_crypto_key_synch()
1430 tipc_aead_users_inc(tx->aead[new], INT_MAX); in tipc_crypto_key_synch()
1432 tipc_aead_users_dec(tx->aead[cur], 0); in tipc_crypto_key_synch()
1434 atomic64_set(&rx->sndnxt, 0); in tipc_crypto_key_synch()
1435 /* Mark the point TX key users changed */ in tipc_crypto_key_synch()
1436 tx->timer1 = jiffies; in tipc_crypto_key_synch()
1438 pr_debug("%s: key users changed %d-- %d++, peer %s\n", in tipc_crypto_key_synch()
1439 tx->name, cur, new, rx->name); in tipc_crypto_key_synch()
1445 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_key_revoke() local
1448 spin_lock(&tx->lock); in tipc_crypto_key_revoke()
1449 key = tx->key; in tipc_crypto_key_revoke()
1453 tipc_crypto_key_set_state(tx, key.passive, 0, key.pending); in tipc_crypto_key_revoke()
1454 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_key_revoke()
1455 spin_unlock(&tx->lock); in tipc_crypto_key_revoke()
1457 pr_warn("%s: key is revoked\n", tx->name); in tipc_crypto_key_revoke()
1458 return -EKEYREVOKED; in tipc_crypto_key_revoke()
1467 return -EEXIST; in tipc_crypto_start()
1472 return -ENOMEM; in tipc_crypto_start()
1474 /* Allocate workqueue on TX */ in tipc_crypto_start()
1476 c->wq = alloc_ordered_workqueue("tipc_crypto", 0); in tipc_crypto_start()
1477 if (!c->wq) { in tipc_crypto_start()
1479 return -ENOMEM; in tipc_crypto_start()
1484 c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC); in tipc_crypto_start()
1485 if (!c->stats) { in tipc_crypto_start()
1486 if (c->wq) in tipc_crypto_start()
1487 destroy_workqueue(c->wq); in tipc_crypto_start()
1489 return -ENOMEM; in tipc_crypto_start()
1492 c->flags = 0; in tipc_crypto_start()
1493 c->net = net; in tipc_crypto_start()
1494 c->node = node; in tipc_crypto_start()
1495 get_random_bytes(&c->key_gen, 2); in tipc_crypto_start()
1497 atomic_set(&c->key_distr, 0); in tipc_crypto_start()
1498 atomic_set(&c->peer_rx_active, 0); in tipc_crypto_start()
1499 atomic64_set(&c->sndnxt, 0); in tipc_crypto_start()
1500 c->timer1 = jiffies; in tipc_crypto_start()
1501 c->timer2 = jiffies; in tipc_crypto_start()
1502 c->rekeying_intv = TIPC_REKEYING_INTV_DEF; in tipc_crypto_start()
1503 spin_lock_init(&c->lock); in tipc_crypto_start()
1504 scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX", in tipc_crypto_start()
1505 (is_rx(c)) ? tipc_node_get_id_str(c->node) : in tipc_crypto_start()
1506 tipc_own_id_string(c->net)); in tipc_crypto_start()
1509 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx); in tipc_crypto_start()
1511 INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx); in tipc_crypto_start()
1527 c->rekeying_intv = 0; in tipc_crypto_stop()
1528 cancel_delayed_work_sync(&c->work); in tipc_crypto_stop()
1529 destroy_workqueue(c->wq); in tipc_crypto_stop()
1535 tipc_aead_put(rcu_dereference(c->aead[k])); in tipc_crypto_stop()
1537 pr_debug("%s: has been stopped\n", c->name); in tipc_crypto_stop()
1540 free_percpu(c->stats); in tipc_crypto_stop()
1546 void tipc_crypto_timeout(struct tipc_crypto *rx) in tipc_crypto_timeout() argument
1548 struct tipc_net *tn = tipc_net(rx->net); in tipc_crypto_timeout()
1549 struct tipc_crypto *tx = tn->crypto_tx; in tipc_crypto_timeout() local
1553 /* TX pending: taking all users & stable -> active */ in tipc_crypto_timeout()
1554 spin_lock(&tx->lock); in tipc_crypto_timeout()
1555 key = tx->key; in tipc_crypto_timeout()
1556 if (key.active && tipc_aead_users(tx->aead[key.active]) > 0) in tipc_crypto_timeout()
1558 if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1560 if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME)) in tipc_crypto_timeout()
1563 tipc_crypto_key_set_state(tx, key.passive, key.pending, 0); in tipc_crypto_timeout()
1565 tipc_crypto_key_detach(tx->aead[key.active], &tx->lock); in tipc_crypto_timeout()
1566 this_cpu_inc(tx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1567 pr_info("%s: key[%d] is activated\n", tx->name, key.pending); in tipc_crypto_timeout()
1570 spin_unlock(&tx->lock); in tipc_crypto_timeout()
1572 /* RX pending: having user -> active */ in tipc_crypto_timeout()
1573 spin_lock(&rx->lock); in tipc_crypto_timeout()
1574 key = rx->key; in tipc_crypto_timeout()
1575 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0) in tipc_crypto_timeout()
1581 rx->timer2 = jiffies; in tipc_crypto_timeout()
1582 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1583 this_cpu_inc(rx->stats->stat[STAT_SWITCHES]); in tipc_crypto_timeout()
1584 pr_info("%s: key[%d] is activated\n", rx->name, key.pending); in tipc_crypto_timeout()
1588 /* RX pending: not working -> remove */ in tipc_crypto_timeout()
1589 if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10) in tipc_crypto_timeout()
1592 tipc_crypto_key_set_state(rx, key.passive, key.active, 0); in tipc_crypto_timeout()
1593 tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock); in tipc_crypto_timeout()
1594 pr_debug("%s: key[%d] is removed\n", rx->name, key.pending); in tipc_crypto_timeout()
1598 /* RX active: timed out or no user -> pending */ in tipc_crypto_timeout()
1601 if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) && in tipc_crypto_timeout()
1602 tipc_aead_users(rx->aead[key.active]) > 0) in tipc_crypto_timeout()
1609 rx->timer2 = jiffies; in tipc_crypto_timeout()
1610 tipc_crypto_key_set_state(rx, key.passive, 0, key.pending); in tipc_crypto_timeout()
1611 tipc_aead_users_set(rx->aead[key.pending], 0); in tipc_crypto_timeout()
1612 pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active); in tipc_crypto_timeout()
1616 /* RX passive: outdated or not working -> free */ in tipc_crypto_timeout()
1619 if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) && in tipc_crypto_timeout()
1620 tipc_aead_users(rx->aead[key.passive]) > -10) in tipc_crypto_timeout()
1623 tipc_crypto_key_set_state(rx, 0, key.active, key.pending); in tipc_crypto_timeout()
1624 tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock); in tipc_crypto_timeout()
1625 pr_debug("%s: key[%d] is freed\n", rx->name, key.passive); in tipc_crypto_timeout()
1628 spin_unlock(&rx->lock); in tipc_crypto_timeout()
1633 if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_timeout()
1634 tx->legacy_user = 0; in tipc_crypto_timeout()
1642 tipc_crypto_do_cmd(rx->net, cmd); in tipc_crypto_timeout()
1654 TIPC_SKB_CB(skb)->xmit_type = type; in tipc_crypto_clone_msg()
1657 b->media->send_msg(net, skb, b, dst); in tipc_crypto_clone_msg()
1662 * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
1678 * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
1679 * -ENOKEK : the encryption has failed due to no key
1680 * -EKEYREVOKED : the encryption has failed due to key revoked
1681 * -ENOMEM : the encryption has failed due to no memory
1689 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_xmit() local
1690 struct tipc_crypto_stats __percpu *stats = tx->stats; in tipc_crypto_xmit()
1692 struct tipc_key key = tx->key; in tipc_crypto_xmit()
1696 int rc = -ENOKEY; in tipc_crypto_xmit()
1700 if (!tx->working) in tipc_crypto_xmit()
1706 if (!tx->key_master && !key.active) in tipc_crypto_xmit()
1708 if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key) in tipc_crypto_xmit()
1710 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) { in tipc_crypto_xmit()
1711 pr_debug("%s: probing for key[%d]\n", tx->name, in tipc_crypto_xmit()
1721 if (tx->key_master) { in tipc_crypto_xmit()
1725 if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) { in tipc_crypto_xmit()
1726 pr_debug("%s: gracing for msg (%d %d)\n", tx->name, in tipc_crypto_xmit()
1733 time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) { in tipc_crypto_xmit()
1734 if (__rx && __rx->key_master && in tipc_crypto_xmit()
1735 !atomic_read(&__rx->peer_rx_active)) in tipc_crypto_xmit()
1738 if (likely(!tx->legacy_user)) in tipc_crypto_xmit()
1755 aead = tipc_aead_get(tx->aead[tx_key]); in tipc_crypto_xmit()
1765 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_xmit()
1767 case -EINPROGRESS: in tipc_crypto_xmit()
1768 case -EBUSY: in tipc_crypto_xmit()
1769 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_xmit()
1773 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_xmit()
1774 if (rc == -ENOKEY) in tipc_crypto_xmit()
1775 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_xmit()
1776 else if (rc == -EKEYREVOKED) in tipc_crypto_xmit()
1777 this_cpu_inc(stats->stat[STAT_BADKEYS]); in tipc_crypto_xmit()
1788 * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
1790 * @rx: RX crypto handle
1798 * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
1799 * cluster key(s) can be taken for decryption (- recursive).
1803 * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
1804 * -ENOKEY : the decryption has failed due to no key
1805 * -EBADMSG : the decryption has failed due to bad message
1806 * -ENOMEM : the decryption has failed due to no memory
1809 int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx, in tipc_crypto_rcv() argument
1812 struct tipc_crypto *tx = tipc_net(net)->crypto_tx; in tipc_crypto_rcv() local
1816 int rc = -ENOKEY; in tipc_crypto_rcv()
1819 tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key; in tipc_crypto_rcv()
1822 * Let's try with TX key (i.e. cluster mode) & verify the skb first! in tipc_crypto_rcv()
1824 if (unlikely(!rx || tx_key == KEY_MASTER)) in tipc_crypto_rcv()
1827 /* Pick RX key according to TX key if any */ in tipc_crypto_rcv()
1828 key = rx->key; in tipc_crypto_rcv()
1833 /* Unknown key, let's try to align RX key(s) */ in tipc_crypto_rcv()
1834 if (tipc_crypto_key_try_align(rx, tx_key)) in tipc_crypto_rcv()
1838 /* No key suitable? Try to pick one from TX... */ in tipc_crypto_rcv()
1839 aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key); in tipc_crypto_rcv()
1847 aead = tipc_aead_get(rx->aead[tx_key]); in tipc_crypto_rcv()
1852 stats = ((rx) ?: tx)->stats; in tipc_crypto_rcv()
1855 this_cpu_inc(stats->stat[STAT_OK]); in tipc_crypto_rcv()
1857 case -EINPROGRESS: in tipc_crypto_rcv()
1858 case -EBUSY: in tipc_crypto_rcv()
1859 this_cpu_inc(stats->stat[STAT_ASYNC]); in tipc_crypto_rcv()
1863 this_cpu_inc(stats->stat[STAT_NOK]); in tipc_crypto_rcv()
1864 if (rc == -ENOKEY) { in tipc_crypto_rcv()
1867 if (rx) { in tipc_crypto_rcv()
1868 /* Mark rx->nokey only if we dont have a in tipc_crypto_rcv()
1873 rx->nokey = !(rx->skey || in tipc_crypto_rcv()
1874 rcu_access_pointer(rx->aead[n])); in tipc_crypto_rcv()
1876 rx->name, rx->nokey, in tipc_crypto_rcv()
1877 tx_key, rx->key.keys); in tipc_crypto_rcv()
1878 tipc_node_put(rx->node); in tipc_crypto_rcv()
1880 this_cpu_inc(stats->stat[STAT_NOKEYS]); in tipc_crypto_rcv()
1882 } else if (rc == -EBADMSG) { in tipc_crypto_rcv()
1883 this_cpu_inc(stats->stat[STAT_BADMSGS]); in tipc_crypto_rcv()
1897 struct tipc_crypto *rx = aead->crypto; in tipc_crypto_rcv_complete() local
1902 /* Is this completed by TX? */ in tipc_crypto_rcv_complete()
1903 if (unlikely(is_tx(aead->crypto))) { in tipc_crypto_rcv_complete()
1904 rx = skb_cb->tx_clone_ctx.rx; in tipc_crypto_rcv_complete()
1905 pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n", in tipc_crypto_rcv_complete()
1906 (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead, in tipc_crypto_rcv_complete()
1907 (*skb)->next, skb_cb->flags); in tipc_crypto_rcv_complete()
1908 pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n", in tipc_crypto_rcv_complete()
1909 skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last, in tipc_crypto_rcv_complete()
1910 aead->crypto->aead[1], aead->crypto->aead[2], in tipc_crypto_rcv_complete()
1911 aead->crypto->aead[3]); in tipc_crypto_rcv_complete()
1913 if (err == -EBADMSG && (*skb)->next) in tipc_crypto_rcv_complete()
1914 tipc_rcv(net, (*skb)->next, b); in tipc_crypto_rcv_complete()
1918 if (likely((*skb)->next)) { in tipc_crypto_rcv_complete()
1919 kfree_skb((*skb)->next); in tipc_crypto_rcv_complete()
1920 (*skb)->next = NULL; in tipc_crypto_rcv_complete()
1922 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1923 if (!rx) { in tipc_crypto_rcv_complete()
1924 WARN_ON(ehdr->user != LINK_CONFIG); in tipc_crypto_rcv_complete()
1925 n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0, in tipc_crypto_rcv_complete()
1927 rx = tipc_node_crypto_rx(n); in tipc_crypto_rcv_complete()
1928 if (unlikely(!rx)) in tipc_crypto_rcv_complete()
1932 /* Ignore cloning if it was TX master key */ in tipc_crypto_rcv_complete()
1933 if (ehdr->tx_key == KEY_MASTER) in tipc_crypto_rcv_complete()
1937 WARN_ON(!refcount_inc_not_zero(&tmp->refcnt)); in tipc_crypto_rcv_complete()
1938 if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) { in tipc_crypto_rcv_complete()
1939 tipc_aead_free(&tmp->rcu); in tipc_crypto_rcv_complete()
1951 /* Set the RX key's user */ in tipc_crypto_rcv_complete()
1954 /* Mark this point, RX works */ in tipc_crypto_rcv_complete()
1955 rx->timer1 = jiffies; in tipc_crypto_rcv_complete()
1959 ehdr = (struct tipc_ehdr *)(*skb)->data; in tipc_crypto_rcv_complete()
1961 /* Mark this point, RX passive still works */ in tipc_crypto_rcv_complete()
1962 if (rx->key.passive && ehdr->tx_key == rx->key.passive) in tipc_crypto_rcv_complete()
1963 rx->timer2 = jiffies; in tipc_crypto_rcv_complete()
1967 pskb_trim(*skb, (*skb)->len - aead->authsize); in tipc_crypto_rcv_complete()
1976 tipc_crypto_key_synch(rx, *skb); in tipc_crypto_rcv_complete()
1979 skb_cb->decrypted = 1; in tipc_crypto_rcv_complete()
1982 if (likely(!skb_cb->tx_clone_deferred)) in tipc_crypto_rcv_complete()
1984 skb_cb->tx_clone_deferred = 0; in tipc_crypto_rcv_complete()
1985 memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx)); in tipc_crypto_rcv_complete()
1994 if (rx) in tipc_crypto_rcv_complete()
1995 tipc_node_put(rx->node); in tipc_crypto_rcv_complete()
2001 struct tipc_crypto *tx = tn->crypto_tx, *rx; in tipc_crypto_do_cmd() local
2021 pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net), in tipc_crypto_do_cmd()
2022 tipc_crypto_key_dump(tx, buf)); in tipc_crypto_do_cmd()
2025 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2026 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2027 pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node), in tipc_crypto_do_cmd()
2028 tipc_crypto_key_dump(rx, buf)); in tipc_crypto_do_cmd()
2034 j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]); in tipc_crypto_do_cmd()
2037 memset(buf, '-', 115); in tipc_crypto_do_cmd()
2041 j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net)); in tipc_crypto_do_cmd()
2044 stat = per_cpu_ptr(tx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2045 j += scnprintf(buf + j, 200 - j, "|%11d ", stat); in tipc_crypto_do_cmd()
2052 for (p = tn->node_list.next; p != &tn->node_list; p = p->next) { in tipc_crypto_do_cmd()
2053 rx = tipc_node_crypto_rx_by_list(p); in tipc_crypto_do_cmd()
2054 j = scnprintf(buf, 200, "RX(%7.7s) ", in tipc_crypto_do_cmd()
2055 tipc_node_get_id_str(rx->node)); in tipc_crypto_do_cmd()
2058 stat = per_cpu_ptr(rx->stats, cpu)->stat[i]; in tipc_crypto_do_cmd()
2059 j += scnprintf(buf + j, 200 - j, "|%11d ", in tipc_crypto_do_cmd()
2073 struct tipc_key key = c->key; in tipc_crypto_key_dump()
2083 c->timer2 + TIPC_TX_GRACE_PERIOD)) in tipc_crypto_key_dump()
2095 s = "-"; in tipc_crypto_key_dump()
2097 i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s); in tipc_crypto_key_dump()
2100 aead = rcu_dereference(c->aead[k]); in tipc_crypto_key_dump()
2102 i += scnprintf(buf + i, 200 - i, in tipc_crypto_key_dump()
2104 aead->hint, in tipc_crypto_key_dump()
2105 (aead->mode == CLUSTER_KEY) ? "c" : "p", in tipc_crypto_key_dump()
2106 atomic_read(&aead->users), in tipc_crypto_key_dump()
2107 refcount_read(&aead->refcnt)); in tipc_crypto_key_dump()
2109 i += scnprintf(buf + i, 200 - i, "\n"); in tipc_crypto_key_dump()
2113 i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n", in tipc_crypto_key_dump()
2114 atomic_read(&c->peer_rx_active)); in tipc_crypto_key_dump()
2126 /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */ in tipc_key_change_dump()
2128 i += scnprintf(buf + i, 32 - i, "["); in tipc_key_change_dump()
2130 if (k == key->passive) in tipc_key_change_dump()
2132 else if (k == key->active) in tipc_key_change_dump()
2134 else if (k == key->pending) in tipc_key_change_dump()
2137 s = "-"; in tipc_key_change_dump()
2138 i += scnprintf(buf + i, 32 - i, in tipc_key_change_dump()
2142 i += scnprintf(buf + i, 32 - i, "] -> "); in tipc_key_change_dump()
2146 i += scnprintf(buf + i, 32 - i, "]"); in tipc_key_change_dump()
2151 * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
2157 struct tipc_crypto *rx; in tipc_crypto_msg_rcv() local
2164 rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr)); in tipc_crypto_msg_rcv()
2165 if (unlikely(!rx)) in tipc_crypto_msg_rcv()
2170 if (tipc_crypto_key_rcv(rx, hdr)) in tipc_crypto_msg_rcv()
2177 tipc_node_put(rx->node); in tipc_crypto_msg_rcv()
2184 * tipc_crypto_key_distr - Distribute a TX key
2185 * @tx: the TX crypto
2191 int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key, in tipc_crypto_key_distr() argument
2196 int rc = -ENOKEY; in tipc_crypto_key_distr()
2203 aead = tipc_aead_get(tx->aead[key]); in tipc_crypto_key_distr()
2205 rc = tipc_crypto_key_xmit(tx->net, aead->key, in tipc_crypto_key_distr()
2206 aead->gen, aead->mode, in tipc_crypto_key_distr()
2217 * tipc_crypto_key_xmit - Send a session key
2225 * as its data section, then xmit-ed through the uc/bc link.
2242 return -ENOMEM; in tipc_crypto_key_xmit()
2252 *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen); in tipc_crypto_key_xmit()
2253 memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_xmit()
2254 memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key, in tipc_crypto_key_xmit()
2255 skey->keylen); in tipc_crypto_key_xmit()
2268 * tipc_crypto_key_rcv - Receive a session key
2269 * @rx: the RX crypto
2273 * schedules a RX work to attach the key to the corresponding RX crypto.
2278 static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) in tipc_crypto_key_rcv() argument
2280 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_key_rcv() local
2289 pr_debug("%s: message data size is too small\n", rx->name); in tipc_crypto_key_rcv()
2298 pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); in tipc_crypto_key_rcv()
2302 spin_lock(&rx->lock); in tipc_crypto_key_rcv()
2303 if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) { in tipc_crypto_key_rcv()
2304 pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name, in tipc_crypto_key_rcv()
2305 rx->skey, key_gen, rx->key_gen); in tipc_crypto_key_rcv()
2312 pr_err("%s: unable to allocate memory for skey\n", rx->name); in tipc_crypto_key_rcv()
2317 skey->keylen = keylen; in tipc_crypto_key_rcv()
2318 memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME); in tipc_crypto_key_rcv()
2319 memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32), in tipc_crypto_key_rcv()
2320 skey->keylen); in tipc_crypto_key_rcv()
2322 rx->key_gen = key_gen; in tipc_crypto_key_rcv()
2323 rx->skey_mode = msg_key_mode(hdr); in tipc_crypto_key_rcv()
2324 rx->skey = skey; in tipc_crypto_key_rcv()
2325 rx->nokey = 0; in tipc_crypto_key_rcv()
2329 spin_unlock(&rx->lock); in tipc_crypto_key_rcv()
2333 if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0))) in tipc_crypto_key_rcv()
2340 * tipc_crypto_work_rx - Scheduled RX works handler
2341 * @work: the struct RX work
2343 * The function processes the previous scheduled works i.e. distributing TX key
2344 * or attaching a received session key on RX crypto.
2349 struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_rx() local
2350 struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx; in tipc_crypto_work_rx() local
2356 /* Case 1: Distribute TX key to peer if scheduled */ in tipc_crypto_work_rx()
2357 if (atomic_cmpxchg(&rx->key_distr, in tipc_crypto_work_rx()
2361 key = tx->key.pending ?: tx->key.active; in tipc_crypto_work_rx()
2362 rc = tipc_crypto_key_distr(tx, key, rx->node); in tipc_crypto_work_rx()
2365 tx->name, key, tipc_node_get_id_str(rx->node), in tipc_crypto_work_rx()
2371 atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0); in tipc_crypto_work_rx()
2375 if (rx->skey) { in tipc_crypto_work_rx()
2376 rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false); in tipc_crypto_work_rx()
2379 rx->name, rc); in tipc_crypto_work_rx()
2381 case -EBUSY: in tipc_crypto_work_rx()
2382 case -ENOMEM: in tipc_crypto_work_rx()
2388 kfree(rx->skey); in tipc_crypto_work_rx()
2389 rx->skey = NULL; in tipc_crypto_work_rx()
2394 if (resched && queue_delayed_work(tx->wq, &rx->work, delay)) in tipc_crypto_work_rx()
2397 tipc_node_put(rx->node); in tipc_crypto_work_rx()
2401 * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
2402 * @tx: TX crypto
2406 void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed, in tipc_crypto_rekeying_sched() argument
2416 tx->rekeying_intv = new_intv; in tipc_crypto_rekeying_sched()
2417 cancel_delayed_work_sync(&tx->work); in tipc_crypto_rekeying_sched()
2420 if (tx->rekeying_intv || now) { in tipc_crypto_rekeying_sched()
2421 delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000; in tipc_crypto_rekeying_sched()
2422 queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay)); in tipc_crypto_rekeying_sched()
2427 * tipc_crypto_work_tx - Scheduled TX works handler
2428 * @work: the struct TX work
2432 * TX crypto and finally distributing it to peers. It also re-schedules the
2438 struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work); in tipc_crypto_work_tx() local
2440 struct tipc_key key = tx->key; in tipc_crypto_work_tx()
2442 int rc = -ENOMEM; in tipc_crypto_work_tx()
2449 aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]); in tipc_crypto_work_tx()
2457 skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC); in tipc_crypto_work_tx()
2463 tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false); in tipc_crypto_work_tx()
2465 rc = tipc_crypto_key_distr(tx, rc, NULL); in tipc_crypto_work_tx()
2470 pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc); in tipc_crypto_work_tx()
2473 /* Re-schedule rekeying if any */ in tipc_crypto_work_tx()
2474 tipc_crypto_rekeying_sched(tx, false, 0); in tipc_crypto_work_tx()