crypto.c 66 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * net/tipc/crypto.c: TIPC crypto for key handling & packet en/decryption
  4. *
  5. * Copyright (c) 2019, Ericsson AB
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include <crypto/aead.h>
  37. #include <crypto/aes.h>
  38. #include <crypto/rng.h>
  39. #include "crypto.h"
  40. #include "msg.h"
  41. #include "bcast.h"
  42. #define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */
  43. #define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */
  44. #define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
  45. #define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */
  46. #define TIPC_MAX_TFMS_DEF 10
  47. #define TIPC_MAX_TFMS_LIM 1000
  48. #define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
  49. /*
  50. * TIPC Key ids
  51. */
  52. enum {
  53. KEY_MASTER = 0,
  54. KEY_MIN = KEY_MASTER,
  55. KEY_1 = 1,
  56. KEY_2,
  57. KEY_3,
  58. KEY_MAX = KEY_3,
  59. };
  60. /*
  61. * TIPC Crypto statistics
  62. */
  63. enum {
  64. STAT_OK,
  65. STAT_NOK,
  66. STAT_ASYNC,
  67. STAT_ASYNC_OK,
  68. STAT_ASYNC_NOK,
  69. STAT_BADKEYS, /* tx only */
  70. STAT_BADMSGS = STAT_BADKEYS, /* rx only */
  71. STAT_NOKEYS,
  72. STAT_SWITCHES,
  73. MAX_STATS,
  74. };
  75. /* TIPC crypto statistics' header */
  76. static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
  77. "async_nok", "badmsgs", "nokeys",
  78. "switches"};
  79. /* Max TFMs number per key */
  80. int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
  81. /* Key exchange switch, default: on */
  82. int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
  83. /*
  84. * struct tipc_key - TIPC keys' status indicator
  85. *
  86. * 7 6 5 4 3 2 1 0
  87. * +-----+-----+-----+-----+-----+-----+-----+-----+
  88. * key: | (reserved)|passive idx| active idx|pending idx|
  89. * +-----+-----+-----+-----+-----+-----+-----+-----+
  90. */
  91. struct tipc_key {
  92. #define KEY_BITS (2)
  93. #define KEY_MASK ((1 << KEY_BITS) - 1)
  94. union {
  95. struct {
  96. #if defined(__LITTLE_ENDIAN_BITFIELD)
  97. u8 pending:2,
  98. active:2,
  99. passive:2, /* rx only */
  100. reserved:2;
  101. #elif defined(__BIG_ENDIAN_BITFIELD)
  102. u8 reserved:2,
  103. passive:2, /* rx only */
  104. active:2,
  105. pending:2;
  106. #else
  107. #error "Please fix <asm/byteorder.h>"
  108. #endif
  109. } __packed;
  110. u8 keys;
  111. };
  112. };
  113. /**
  114. * struct tipc_tfm - TIPC TFM structure to form a list of TFMs
  115. * @tfm: cipher handle/key
  116. * @list: linked list of TFMs
  117. */
  118. struct tipc_tfm {
  119. struct crypto_aead *tfm;
  120. struct list_head list;
  121. };
  122. /**
  123. * struct tipc_aead - TIPC AEAD key structure
  124. * @tfm_entry: per-cpu pointer to one entry in TFM list
  125. * @crypto: TIPC crypto owns this key
  126. * @cloned: reference to the source key in case cloning
  127. * @users: the number of the key users (TX/RX)
  128. * @salt: the key's SALT value
  129. * @authsize: authentication tag size (max = 16)
  130. * @mode: crypto mode is applied to the key
  131. * @hint: a hint for user key
  132. * @rcu: struct rcu_head
  133. * @key: the aead key
  134. * @gen: the key's generation
  135. * @seqno: the key seqno (cluster scope)
  136. * @refcnt: the key reference counter
  137. */
  138. struct tipc_aead {
  139. #define TIPC_AEAD_HINT_LEN (5)
  140. struct tipc_tfm * __percpu *tfm_entry;
  141. struct tipc_crypto *crypto;
  142. struct tipc_aead *cloned;
  143. atomic_t users;
  144. u32 salt;
  145. u8 authsize;
  146. u8 mode;
  147. char hint[2 * TIPC_AEAD_HINT_LEN + 1];
  148. struct rcu_head rcu;
  149. struct tipc_aead_key *key;
  150. u16 gen;
  151. atomic64_t seqno ____cacheline_aligned;
  152. refcount_t refcnt ____cacheline_aligned;
  153. } ____cacheline_aligned;
  154. /**
  155. * struct tipc_crypto_stats - TIPC Crypto statistics
  156. * @stat: array of crypto statistics
  157. */
  158. struct tipc_crypto_stats {
  159. unsigned int stat[MAX_STATS];
  160. };
  161. /**
  162. * struct tipc_crypto - TIPC TX/RX crypto structure
  163. * @net: struct net
  164. * @node: TIPC node (RX)
  165. * @aead: array of pointers to AEAD keys for encryption/decryption
  166. * @peer_rx_active: replicated peer RX active key index
  167. * @key_gen: TX/RX key generation
  168. * @key: the key states
  169. * @skey_mode: session key's mode
  170. * @skey: received session key
  171. * @wq: common workqueue on TX crypto
  172. * @work: delayed work sched for TX/RX
  173. * @key_distr: key distributing state
  174. * @rekeying_intv: rekeying interval (in minutes)
  175. * @stats: the crypto statistics
  176. * @name: the crypto name
  177. * @sndnxt: the per-peer sndnxt (TX)
  178. * @timer1: general timer 1 (jiffies)
  179. * @timer2: general timer 2 (jiffies)
  180. * @working: the crypto is working or not
  181. * @key_master: flag indicates if master key exists
  182. * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
  183. * @nokey: no key indication
  184. * @flags: combined flags field
  185. * @lock: tipc_key lock
  186. */
  187. struct tipc_crypto {
  188. struct net *net;
  189. struct tipc_node *node;
  190. struct tipc_aead __rcu *aead[KEY_MAX + 1];
  191. atomic_t peer_rx_active;
  192. u16 key_gen;
  193. struct tipc_key key;
  194. u8 skey_mode;
  195. struct tipc_aead_key *skey;
  196. struct workqueue_struct *wq;
  197. struct delayed_work work;
  198. #define KEY_DISTR_SCHED 1
  199. #define KEY_DISTR_COMPL 2
  200. atomic_t key_distr;
  201. u32 rekeying_intv;
  202. struct tipc_crypto_stats __percpu *stats;
  203. char name[48];
  204. atomic64_t sndnxt ____cacheline_aligned;
  205. unsigned long timer1;
  206. unsigned long timer2;
  207. union {
  208. struct {
  209. u8 working:1;
  210. u8 key_master:1;
  211. u8 legacy_user:1;
  212. u8 nokey: 1;
  213. };
  214. u8 flags;
  215. };
  216. spinlock_t lock; /* crypto lock */
  217. } ____cacheline_aligned;
  218. /* struct tipc_crypto_tx_ctx - TX context for callbacks */
  219. struct tipc_crypto_tx_ctx {
  220. struct tipc_aead *aead;
  221. struct tipc_bearer *bearer;
  222. struct tipc_media_addr dst;
  223. };
  224. /* struct tipc_crypto_rx_ctx - RX context for callbacks */
  225. struct tipc_crypto_rx_ctx {
  226. struct tipc_aead *aead;
  227. struct tipc_bearer *bearer;
  228. };
  229. static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead);
  230. static inline void tipc_aead_put(struct tipc_aead *aead);
  231. static void tipc_aead_free(struct rcu_head *rp);
  232. static int tipc_aead_users(struct tipc_aead __rcu *aead);
  233. static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim);
  234. static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim);
  235. static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val);
  236. static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead);
  237. static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
  238. u8 mode);
  239. static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src);
  240. static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
  241. unsigned int crypto_ctx_size,
  242. u8 **iv, struct aead_request **req,
  243. struct scatterlist **sg, int nsg);
  244. static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
  245. struct tipc_bearer *b,
  246. struct tipc_media_addr *dst,
  247. struct tipc_node *__dnode);
  248. static void tipc_aead_encrypt_done(void *data, int err);
  249. static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
  250. struct sk_buff *skb, struct tipc_bearer *b);
  251. static void tipc_aead_decrypt_done(void *data, int err);
  252. static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr);
  253. static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
  254. u8 tx_key, struct sk_buff *skb,
  255. struct tipc_crypto *__rx);
  256. static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
  257. u8 new_passive,
  258. u8 new_active,
  259. u8 new_pending);
  260. static int tipc_crypto_key_attach(struct tipc_crypto *c,
  261. struct tipc_aead *aead, u8 pos,
  262. bool master_key);
  263. static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
  264. static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
  265. struct tipc_crypto *rx,
  266. struct sk_buff *skb,
  267. u8 tx_key);
  268. static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
  269. static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
  270. static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
  271. struct tipc_bearer *b,
  272. struct tipc_media_addr *dst,
  273. struct tipc_node *__dnode, u8 type);
  274. static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
  275. struct tipc_bearer *b,
  276. struct sk_buff **skb, int err);
  277. static void tipc_crypto_do_cmd(struct net *net, int cmd);
  278. static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
  279. static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
  280. char *buf);
  281. static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
  282. u16 gen, u8 mode, u32 dnode);
  283. static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
  284. static void tipc_crypto_work_tx(struct work_struct *work);
  285. static void tipc_crypto_work_rx(struct work_struct *work);
  286. static int tipc_aead_key_generate(struct tipc_aead_key *skey);
  287. #define is_tx(crypto) (!(crypto)->node)
  288. #define is_rx(crypto) (!is_tx(crypto))
  289. #define key_next(cur) ((cur) % KEY_MAX + 1)
  290. #define tipc_aead_rcu_ptr(rcu_ptr, lock) \
  291. rcu_dereference_protected((rcu_ptr), lockdep_is_held(lock))
  292. #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \
  293. do { \
  294. struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \
  295. lockdep_is_held(lock)); \
  296. rcu_assign_pointer((rcu_ptr), (ptr)); \
  297. tipc_aead_put(__tmp); \
  298. } while (0)
  299. #define tipc_crypto_key_detach(rcu_ptr, lock) \
  300. tipc_aead_rcu_replace((rcu_ptr), NULL, lock)
  301. /**
  302. * tipc_aead_key_validate - Validate a AEAD user key
  303. * @ukey: pointer to user key data
  304. * @info: netlink info pointer
  305. */
  306. int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
  307. {
  308. int keylen;
  309. /* Check if algorithm exists */
  310. if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
  311. GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
  312. return -ENODEV;
  313. }
  314. /* Currently, we only support the "gcm(aes)" cipher algorithm */
  315. if (strcmp(ukey->alg_name, "gcm(aes)")) {
  316. GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
  317. return -ENOTSUPP;
  318. }
  319. /* Check if key size is correct */
  320. keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
  321. if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
  322. keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
  323. keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
  324. GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
  325. return -EKEYREJECTED;
  326. }
  327. return 0;
  328. }
  329. /**
  330. * tipc_aead_key_generate - Generate new session key
  331. * @skey: input/output key with new content
  332. *
  333. * Return: 0 in case of success, otherwise < 0
  334. */
  335. static int tipc_aead_key_generate(struct tipc_aead_key *skey)
  336. {
  337. int rc = 0;
  338. /* Fill the key's content with a random value via RNG cipher */
  339. rc = crypto_get_default_rng();
  340. if (likely(!rc)) {
  341. rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
  342. skey->keylen);
  343. crypto_put_default_rng();
  344. }
  345. return rc;
  346. }
  347. static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
  348. {
  349. struct tipc_aead *tmp;
  350. rcu_read_lock();
  351. tmp = rcu_dereference(aead);
  352. if (unlikely(!tmp || !refcount_inc_not_zero(&tmp->refcnt)))
  353. tmp = NULL;
  354. rcu_read_unlock();
  355. return tmp;
  356. }
  357. static inline void tipc_aead_put(struct tipc_aead *aead)
  358. {
  359. if (aead && refcount_dec_and_test(&aead->refcnt))
  360. call_rcu(&aead->rcu, tipc_aead_free);
  361. }
  362. /**
  363. * tipc_aead_free - Release AEAD key incl. all the TFMs in the list
  364. * @rp: rcu head pointer
  365. */
  366. static void tipc_aead_free(struct rcu_head *rp)
  367. {
  368. struct tipc_aead *aead = container_of(rp, struct tipc_aead, rcu);
  369. struct tipc_tfm *tfm_entry, *head, *tmp;
  370. if (aead->cloned) {
  371. tipc_aead_put(aead->cloned);
  372. } else {
  373. head = *get_cpu_ptr(aead->tfm_entry);
  374. put_cpu_ptr(aead->tfm_entry);
  375. list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
  376. crypto_free_aead(tfm_entry->tfm);
  377. list_del(&tfm_entry->list);
  378. kfree(tfm_entry);
  379. }
  380. /* Free the head */
  381. crypto_free_aead(head->tfm);
  382. list_del(&head->list);
  383. kfree(head);
  384. }
  385. free_percpu(aead->tfm_entry);
  386. kfree_sensitive(aead->key);
  387. kfree_sensitive(aead);
  388. }
  389. static int tipc_aead_users(struct tipc_aead __rcu *aead)
  390. {
  391. struct tipc_aead *tmp;
  392. int users = 0;
  393. rcu_read_lock();
  394. tmp = rcu_dereference(aead);
  395. if (tmp)
  396. users = atomic_read(&tmp->users);
  397. rcu_read_unlock();
  398. return users;
  399. }
  400. static void tipc_aead_users_inc(struct tipc_aead __rcu *aead, int lim)
  401. {
  402. struct tipc_aead *tmp;
  403. rcu_read_lock();
  404. tmp = rcu_dereference(aead);
  405. if (tmp)
  406. atomic_add_unless(&tmp->users, 1, lim);
  407. rcu_read_unlock();
  408. }
  409. static void tipc_aead_users_dec(struct tipc_aead __rcu *aead, int lim)
  410. {
  411. struct tipc_aead *tmp;
  412. rcu_read_lock();
  413. tmp = rcu_dereference(aead);
  414. if (tmp)
  415. atomic_add_unless(&rcu_dereference(aead)->users, -1, lim);
  416. rcu_read_unlock();
  417. }
  418. static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
  419. {
  420. struct tipc_aead *tmp;
  421. int cur;
  422. rcu_read_lock();
  423. tmp = rcu_dereference(aead);
  424. if (tmp) {
  425. do {
  426. cur = atomic_read(&tmp->users);
  427. if (cur == val)
  428. break;
  429. } while (atomic_cmpxchg(&tmp->users, cur, val) != cur);
  430. }
  431. rcu_read_unlock();
  432. }
  433. /**
  434. * tipc_aead_tfm_next - Move TFM entry to the next one in list and return it
  435. * @aead: the AEAD key pointer
  436. */
  437. static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
  438. {
  439. struct tipc_tfm **tfm_entry;
  440. struct crypto_aead *tfm;
  441. tfm_entry = get_cpu_ptr(aead->tfm_entry);
  442. *tfm_entry = list_next_entry(*tfm_entry, list);
  443. tfm = (*tfm_entry)->tfm;
  444. put_cpu_ptr(tfm_entry);
  445. return tfm;
  446. }
  447. /**
  448. * tipc_aead_init - Initiate TIPC AEAD
  449. * @aead: returned new TIPC AEAD key handle pointer
  450. * @ukey: pointer to user key data
  451. * @mode: the key mode
  452. *
  453. * Allocate a (list of) new cipher transformation (TFM) with the specific user
  454. * key data if valid. The number of the allocated TFMs can be set via the sysfs
  455. * "net/tipc/max_tfms" first.
  456. * Also, all the other AEAD data are also initialized.
  457. *
  458. * Return: 0 if the initiation is successful, otherwise: < 0
  459. */
  460. static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
  461. u8 mode)
  462. {
  463. struct tipc_tfm *tfm_entry, *head;
  464. struct crypto_aead *tfm;
  465. struct tipc_aead *tmp;
  466. int keylen, err, cpu;
  467. int tfm_cnt = 0;
  468. if (unlikely(*aead))
  469. return -EEXIST;
  470. /* Allocate a new AEAD */
  471. tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
  472. if (unlikely(!tmp))
  473. return -ENOMEM;
  474. /* The key consists of two parts: [AES-KEY][SALT] */
  475. keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
  476. /* Allocate per-cpu TFM entry pointer */
  477. tmp->tfm_entry = alloc_percpu(struct tipc_tfm *);
  478. if (!tmp->tfm_entry) {
  479. kfree_sensitive(tmp);
  480. return -ENOMEM;
  481. }
  482. /* Make a list of TFMs with the user key data */
  483. do {
  484. tfm = crypto_alloc_aead(ukey->alg_name, 0, 0);
  485. if (IS_ERR(tfm)) {
  486. err = PTR_ERR(tfm);
  487. break;
  488. }
  489. if (unlikely(!tfm_cnt &&
  490. crypto_aead_ivsize(tfm) != TIPC_AES_GCM_IV_SIZE)) {
  491. crypto_free_aead(tfm);
  492. err = -ENOTSUPP;
  493. break;
  494. }
  495. err = crypto_aead_setauthsize(tfm, TIPC_AES_GCM_TAG_SIZE);
  496. err |= crypto_aead_setkey(tfm, ukey->key, keylen);
  497. if (unlikely(err)) {
  498. crypto_free_aead(tfm);
  499. break;
  500. }
  501. tfm_entry = kmalloc(sizeof(*tfm_entry), GFP_KERNEL);
  502. if (unlikely(!tfm_entry)) {
  503. crypto_free_aead(tfm);
  504. err = -ENOMEM;
  505. break;
  506. }
  507. INIT_LIST_HEAD(&tfm_entry->list);
  508. tfm_entry->tfm = tfm;
  509. /* First entry? */
  510. if (!tfm_cnt) {
  511. head = tfm_entry;
  512. for_each_possible_cpu(cpu) {
  513. *per_cpu_ptr(tmp->tfm_entry, cpu) = head;
  514. }
  515. } else {
  516. list_add_tail(&tfm_entry->list, &head->list);
  517. }
  518. } while (++tfm_cnt < sysctl_tipc_max_tfms);
  519. /* Not any TFM is allocated? */
  520. if (!tfm_cnt) {
  521. free_percpu(tmp->tfm_entry);
  522. kfree_sensitive(tmp);
  523. return err;
  524. }
  525. /* Form a hex string of some last bytes as the key's hint */
  526. bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
  527. TIPC_AEAD_HINT_LEN);
  528. /* Initialize the other data */
  529. tmp->mode = mode;
  530. tmp->cloned = NULL;
  531. tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
  532. tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
  533. if (!tmp->key) {
  534. tipc_aead_free(&tmp->rcu);
  535. return -ENOMEM;
  536. }
  537. memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
  538. atomic_set(&tmp->users, 0);
  539. atomic64_set(&tmp->seqno, 0);
  540. refcount_set(&tmp->refcnt, 1);
  541. *aead = tmp;
  542. return 0;
  543. }
  544. /**
  545. * tipc_aead_clone - Clone a TIPC AEAD key
  546. * @dst: dest key for the cloning
  547. * @src: source key to clone from
  548. *
  549. * Make a "copy" of the source AEAD key data to the dest, the TFMs list is
  550. * common for the keys.
  551. * A reference to the source is hold in the "cloned" pointer for the later
  552. * freeing purposes.
  553. *
  554. * Note: this must be done in cluster-key mode only!
  555. * Return: 0 in case of success, otherwise < 0
  556. */
  557. static int tipc_aead_clone(struct tipc_aead **dst, struct tipc_aead *src)
  558. {
  559. struct tipc_aead *aead;
  560. int cpu;
  561. if (!src)
  562. return -ENOKEY;
  563. if (src->mode != CLUSTER_KEY)
  564. return -EINVAL;
  565. if (unlikely(*dst))
  566. return -EEXIST;
  567. aead = kzalloc(sizeof(*aead), GFP_ATOMIC);
  568. if (unlikely(!aead))
  569. return -ENOMEM;
  570. aead->tfm_entry = alloc_percpu_gfp(struct tipc_tfm *, GFP_ATOMIC);
  571. if (unlikely(!aead->tfm_entry)) {
  572. kfree_sensitive(aead);
  573. return -ENOMEM;
  574. }
  575. for_each_possible_cpu(cpu) {
  576. *per_cpu_ptr(aead->tfm_entry, cpu) =
  577. *per_cpu_ptr(src->tfm_entry, cpu);
  578. }
  579. memcpy(aead->hint, src->hint, sizeof(src->hint));
  580. aead->mode = src->mode;
  581. aead->salt = src->salt;
  582. aead->authsize = src->authsize;
  583. atomic_set(&aead->users, 0);
  584. atomic64_set(&aead->seqno, 0);
  585. refcount_set(&aead->refcnt, 1);
  586. WARN_ON(!refcount_inc_not_zero(&src->refcnt));
  587. aead->cloned = src;
  588. *dst = aead;
  589. return 0;
  590. }
  591. /**
  592. * tipc_aead_mem_alloc - Allocate memory for AEAD request operations
  593. * @tfm: cipher handle to be registered with the request
  594. * @crypto_ctx_size: size of crypto context for callback
  595. * @iv: returned pointer to IV data
  596. * @req: returned pointer to AEAD request data
  597. * @sg: returned pointer to SG lists
  598. * @nsg: number of SG lists to be allocated
  599. *
  600. * Allocate memory to store the crypto context data, AEAD request, IV and SG
  601. * lists, the memory layout is as follows:
  602. * crypto_ctx || iv || aead_req || sg[]
  603. *
  604. * Return: the pointer to the memory areas in case of success, otherwise NULL
  605. */
  606. static void *tipc_aead_mem_alloc(struct crypto_aead *tfm,
  607. unsigned int crypto_ctx_size,
  608. u8 **iv, struct aead_request **req,
  609. struct scatterlist **sg, int nsg)
  610. {
  611. unsigned int iv_size, req_size;
  612. unsigned int len;
  613. u8 *mem;
  614. iv_size = crypto_aead_ivsize(tfm);
  615. req_size = sizeof(**req) + crypto_aead_reqsize(tfm);
  616. len = crypto_ctx_size;
  617. len += iv_size;
  618. len += crypto_aead_alignmask(tfm) & ~(crypto_tfm_ctx_alignment() - 1);
  619. len = ALIGN(len, crypto_tfm_ctx_alignment());
  620. len += req_size;
  621. len = ALIGN(len, __alignof__(struct scatterlist));
  622. len += nsg * sizeof(**sg);
  623. mem = kmalloc(len, GFP_ATOMIC);
  624. if (!mem)
  625. return NULL;
  626. *iv = (u8 *)PTR_ALIGN(mem + crypto_ctx_size,
  627. crypto_aead_alignmask(tfm) + 1);
  628. *req = (struct aead_request *)PTR_ALIGN(*iv + iv_size,
  629. crypto_tfm_ctx_alignment());
  630. *sg = (struct scatterlist *)PTR_ALIGN((u8 *)*req + req_size,
  631. __alignof__(struct scatterlist));
  632. return (void *)mem;
  633. }
  634. /**
  635. * tipc_aead_encrypt - Encrypt a message
  636. * @aead: TIPC AEAD key for the message encryption
  637. * @skb: the input/output skb
  638. * @b: TIPC bearer where the message will be delivered after the encryption
  639. * @dst: the destination media address
  640. * @__dnode: TIPC dest node if "known"
  641. *
  642. * Return:
  643. * * 0 : if the encryption has completed
  644. * * -EINPROGRESS/-EBUSY : if a callback will be performed
  645. * * < 0 : the encryption has failed
  646. */
  647. static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
  648. struct tipc_bearer *b,
  649. struct tipc_media_addr *dst,
  650. struct tipc_node *__dnode)
  651. {
  652. struct crypto_aead *tfm = tipc_aead_tfm_next(aead);
  653. struct tipc_crypto_tx_ctx *tx_ctx;
  654. struct aead_request *req;
  655. struct sk_buff *trailer;
  656. struct scatterlist *sg;
  657. struct tipc_ehdr *ehdr;
  658. int ehsz, len, tailen, nsg, rc;
  659. void *ctx;
  660. u32 salt;
  661. u8 *iv;
  662. /* Make sure message len at least 4-byte aligned */
  663. len = ALIGN(skb->len, 4);
  664. tailen = len - skb->len + aead->authsize;
  665. /* Expand skb tail for authentication tag:
  666. * As for simplicity, we'd have made sure skb having enough tailroom
  667. * for authentication tag @skb allocation. Even when skb is nonlinear
  668. * but there is no frag_list, it should be still fine!
  669. * Otherwise, we must cow it to be a writable buffer with the tailroom.
  670. */
  671. SKB_LINEAR_ASSERT(skb);
  672. if (tailen > skb_tailroom(skb)) {
  673. pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
  674. skb_tailroom(skb), tailen);
  675. }
  676. nsg = skb_cow_data(skb, tailen, &trailer);
  677. if (unlikely(nsg < 0)) {
  678. pr_err("TX: skb_cow_data() returned %d\n", nsg);
  679. return nsg;
  680. }
  681. pskb_put(skb, trailer, tailen);
  682. /* Allocate memory for the AEAD operation */
  683. ctx = tipc_aead_mem_alloc(tfm, sizeof(*tx_ctx), &iv, &req, &sg, nsg);
  684. if (unlikely(!ctx))
  685. return -ENOMEM;
  686. TIPC_SKB_CB(skb)->crypto_ctx = ctx;
  687. /* Map skb to the sg lists */
  688. sg_init_table(sg, nsg);
  689. rc = skb_to_sgvec(skb, sg, 0, skb->len);
  690. if (unlikely(rc < 0)) {
  691. pr_err("TX: skb_to_sgvec() returned %d, nsg %d!\n", rc, nsg);
  692. goto exit;
  693. }
  694. /* Prepare IV: [SALT (4 octets)][SEQNO (8 octets)]
  695. * In case we're in cluster-key mode, SALT is varied by xor-ing with
  696. * the source address (or w0 of id), otherwise with the dest address
  697. * if dest is known.
  698. */
  699. ehdr = (struct tipc_ehdr *)skb->data;
  700. salt = aead->salt;
  701. if (aead->mode == CLUSTER_KEY)
  702. salt ^= __be32_to_cpu(ehdr->addr);
  703. else if (__dnode)
  704. salt ^= tipc_node_get_addr(__dnode);
  705. memcpy(iv, &salt, 4);
  706. memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
  707. /* Prepare request */
  708. ehsz = tipc_ehdr_size(ehdr);
  709. aead_request_set_tfm(req, tfm);
  710. aead_request_set_ad(req, ehsz);
  711. aead_request_set_crypt(req, sg, sg, len - ehsz, iv);
  712. /* Set callback function & data */
  713. aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  714. tipc_aead_encrypt_done, skb);
  715. tx_ctx = (struct tipc_crypto_tx_ctx *)ctx;
  716. tx_ctx->aead = aead;
  717. tx_ctx->bearer = b;
  718. memcpy(&tx_ctx->dst, dst, sizeof(*dst));
  719. /* Hold bearer */
  720. if (unlikely(!tipc_bearer_hold(b))) {
  721. rc = -ENODEV;
  722. goto exit;
  723. }
  724. /* Get net to avoid freed tipc_crypto when delete namespace */
  725. if (!maybe_get_net(aead->crypto->net)) {
  726. tipc_bearer_put(b);
  727. rc = -ENODEV;
  728. goto exit;
  729. }
  730. /* Now, do encrypt */
  731. rc = crypto_aead_encrypt(req);
  732. if (rc == -EINPROGRESS || rc == -EBUSY)
  733. return rc;
  734. tipc_bearer_put(b);
  735. put_net(aead->crypto->net);
  736. exit:
  737. kfree(ctx);
  738. TIPC_SKB_CB(skb)->crypto_ctx = NULL;
  739. return rc;
  740. }
  741. static void tipc_aead_encrypt_done(void *data, int err)
  742. {
  743. struct sk_buff *skb = data;
  744. struct tipc_crypto_tx_ctx *tx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
  745. struct tipc_bearer *b = tx_ctx->bearer;
  746. struct tipc_aead *aead = tx_ctx->aead;
  747. struct tipc_crypto *tx = aead->crypto;
  748. struct net *net = tx->net;
  749. switch (err) {
  750. case 0:
  751. this_cpu_inc(tx->stats->stat[STAT_ASYNC_OK]);
  752. rcu_read_lock();
  753. if (likely(test_bit(0, &b->up)))
  754. b->media->send_msg(net, skb, b, &tx_ctx->dst);
  755. else
  756. kfree_skb(skb);
  757. rcu_read_unlock();
  758. break;
  759. case -EINPROGRESS:
  760. return;
  761. default:
  762. this_cpu_inc(tx->stats->stat[STAT_ASYNC_NOK]);
  763. kfree_skb(skb);
  764. break;
  765. }
  766. kfree(tx_ctx);
  767. tipc_bearer_put(b);
  768. tipc_aead_put(aead);
  769. put_net(net);
  770. }
  771. /**
  772. * tipc_aead_decrypt - Decrypt an encrypted message
  773. * @net: struct net
  774. * @aead: TIPC AEAD for the message decryption
  775. * @skb: the input/output skb
  776. * @b: TIPC bearer where the message has been received
  777. *
  778. * Return:
  779. * * 0 : if the decryption has completed
  780. * * -EINPROGRESS/-EBUSY : if a callback will be performed
  781. * * < 0 : the decryption has failed
  782. */
  783. static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead,
  784. struct sk_buff *skb, struct tipc_bearer *b)
  785. {
  786. struct tipc_crypto_rx_ctx *rx_ctx;
  787. struct aead_request *req;
  788. struct crypto_aead *tfm;
  789. struct sk_buff *unused;
  790. struct scatterlist *sg;
  791. struct tipc_ehdr *ehdr;
  792. int ehsz, nsg, rc;
  793. void *ctx;
  794. u32 salt;
  795. u8 *iv;
  796. if (unlikely(!aead))
  797. return -ENOKEY;
  798. nsg = skb_cow_data(skb, 0, &unused);
  799. if (unlikely(nsg < 0)) {
  800. pr_err("RX: skb_cow_data() returned %d\n", nsg);
  801. return nsg;
  802. }
  803. /* Allocate memory for the AEAD operation */
  804. tfm = tipc_aead_tfm_next(aead);
  805. ctx = tipc_aead_mem_alloc(tfm, sizeof(*rx_ctx), &iv, &req, &sg, nsg);
  806. if (unlikely(!ctx))
  807. return -ENOMEM;
  808. TIPC_SKB_CB(skb)->crypto_ctx = ctx;
  809. /* Map skb to the sg lists */
  810. sg_init_table(sg, nsg);
  811. rc = skb_to_sgvec(skb, sg, 0, skb->len);
  812. if (unlikely(rc < 0)) {
  813. pr_err("RX: skb_to_sgvec() returned %d, nsg %d\n", rc, nsg);
  814. goto exit;
  815. }
  816. /* Reconstruct IV: */
  817. ehdr = (struct tipc_ehdr *)skb->data;
  818. salt = aead->salt;
  819. if (aead->mode == CLUSTER_KEY)
  820. salt ^= __be32_to_cpu(ehdr->addr);
  821. else if (ehdr->destined)
  822. salt ^= tipc_own_addr(net);
  823. memcpy(iv, &salt, 4);
  824. memcpy(iv + 4, (u8 *)&ehdr->seqno, 8);
  825. /* Prepare request */
  826. ehsz = tipc_ehdr_size(ehdr);
  827. aead_request_set_tfm(req, tfm);
  828. aead_request_set_ad(req, ehsz);
  829. aead_request_set_crypt(req, sg, sg, skb->len - ehsz, iv);
  830. /* Set callback function & data */
  831. aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  832. tipc_aead_decrypt_done, skb);
  833. rx_ctx = (struct tipc_crypto_rx_ctx *)ctx;
  834. rx_ctx->aead = aead;
  835. rx_ctx->bearer = b;
  836. /* Hold bearer */
  837. if (unlikely(!tipc_bearer_hold(b))) {
  838. rc = -ENODEV;
  839. goto exit;
  840. }
  841. /* Now, do decrypt */
  842. rc = crypto_aead_decrypt(req);
  843. if (rc == -EINPROGRESS || rc == -EBUSY)
  844. return rc;
  845. tipc_bearer_put(b);
  846. exit:
  847. kfree(ctx);
  848. TIPC_SKB_CB(skb)->crypto_ctx = NULL;
  849. return rc;
  850. }
  851. static void tipc_aead_decrypt_done(void *data, int err)
  852. {
  853. struct sk_buff *skb = data;
  854. struct tipc_crypto_rx_ctx *rx_ctx = TIPC_SKB_CB(skb)->crypto_ctx;
  855. struct tipc_bearer *b = rx_ctx->bearer;
  856. struct tipc_aead *aead = rx_ctx->aead;
  857. struct tipc_crypto_stats __percpu *stats = aead->crypto->stats;
  858. struct net *net = aead->crypto->net;
  859. switch (err) {
  860. case 0:
  861. this_cpu_inc(stats->stat[STAT_ASYNC_OK]);
  862. break;
  863. case -EINPROGRESS:
  864. return;
  865. default:
  866. this_cpu_inc(stats->stat[STAT_ASYNC_NOK]);
  867. break;
  868. }
  869. kfree(rx_ctx);
  870. tipc_crypto_rcv_complete(net, aead, b, &skb, err);
  871. if (likely(skb)) {
  872. if (likely(test_bit(0, &b->up)))
  873. tipc_rcv(net, skb, b);
  874. else
  875. kfree_skb(skb);
  876. }
  877. tipc_bearer_put(b);
  878. }
  879. static inline int tipc_ehdr_size(struct tipc_ehdr *ehdr)
  880. {
  881. return (ehdr->user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
  882. }
  883. /**
  884. * tipc_ehdr_validate - Validate an encryption message
  885. * @skb: the message buffer
  886. *
  887. * Return: "true" if this is a valid encryption message, otherwise "false"
  888. */
  889. bool tipc_ehdr_validate(struct sk_buff *skb)
  890. {
  891. struct tipc_ehdr *ehdr;
  892. int ehsz;
  893. if (unlikely(!pskb_may_pull(skb, EHDR_MIN_SIZE)))
  894. return false;
  895. ehdr = (struct tipc_ehdr *)skb->data;
  896. if (unlikely(ehdr->version != TIPC_EVERSION))
  897. return false;
  898. ehsz = tipc_ehdr_size(ehdr);
  899. if (unlikely(!pskb_may_pull(skb, ehsz)))
  900. return false;
  901. if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
  902. return false;
  903. return true;
  904. }
  905. /**
  906. * tipc_ehdr_build - Build TIPC encryption message header
  907. * @net: struct net
  908. * @aead: TX AEAD key to be used for the message encryption
  909. * @tx_key: key id used for the message encryption
  910. * @skb: input/output message skb
  911. * @__rx: RX crypto handle if dest is "known"
  912. *
  913. * Return: the header size if the building is successful, otherwise < 0
  914. */
  915. static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
  916. u8 tx_key, struct sk_buff *skb,
  917. struct tipc_crypto *__rx)
  918. {
  919. struct tipc_msg *hdr = buf_msg(skb);
  920. struct tipc_ehdr *ehdr;
  921. u32 user = msg_user(hdr);
  922. u64 seqno;
  923. int ehsz;
  924. /* Make room for encryption header */
  925. ehsz = (user != LINK_CONFIG) ? EHDR_SIZE : EHDR_CFG_SIZE;
  926. WARN_ON(skb_headroom(skb) < ehsz);
  927. ehdr = (struct tipc_ehdr *)skb_push(skb, ehsz);
  928. /* Obtain a seqno first:
  929. * Use the key seqno (= cluster wise) if dest is unknown or we're in
  930. * cluster key mode, otherwise it's better for a per-peer seqno!
  931. */
  932. if (!__rx || aead->mode == CLUSTER_KEY)
  933. seqno = atomic64_inc_return(&aead->seqno);
  934. else
  935. seqno = atomic64_inc_return(&__rx->sndnxt);
  936. /* Revoke the key if seqno is wrapped around */
  937. if (unlikely(!seqno))
  938. return tipc_crypto_key_revoke(net, tx_key);
  939. /* Word 1-2 */
  940. ehdr->seqno = cpu_to_be64(seqno);
  941. /* Words 0, 3- */
  942. ehdr->version = TIPC_EVERSION;
  943. ehdr->user = 0;
  944. ehdr->keepalive = 0;
  945. ehdr->tx_key = tx_key;
  946. ehdr->destined = (__rx) ? 1 : 0;
  947. ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
  948. ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
  949. ehdr->master_key = aead->crypto->key_master;
  950. ehdr->reserved_1 = 0;
  951. ehdr->reserved_2 = 0;
  952. switch (user) {
  953. case LINK_CONFIG:
  954. ehdr->user = LINK_CONFIG;
  955. memcpy(ehdr->id, tipc_own_id(net), NODE_ID_LEN);
  956. break;
  957. default:
  958. if (user == LINK_PROTOCOL && msg_type(hdr) == STATE_MSG) {
  959. ehdr->user = LINK_PROTOCOL;
  960. ehdr->keepalive = msg_is_keepalive(hdr);
  961. }
  962. ehdr->addr = hdr->hdr[3];
  963. break;
  964. }
  965. return ehsz;
  966. }
  967. static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
  968. u8 new_passive,
  969. u8 new_active,
  970. u8 new_pending)
  971. {
  972. struct tipc_key old = c->key;
  973. char buf[32];
  974. c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
  975. ((new_active & KEY_MASK) << (KEY_BITS)) |
  976. ((new_pending & KEY_MASK));
  977. pr_debug("%s: key changing %s ::%pS\n", c->name,
  978. tipc_key_change_dump(old, c->key, buf),
  979. __builtin_return_address(0));
  980. }
  981. /**
  982. * tipc_crypto_key_init - Initiate a new user / AEAD key
  983. * @c: TIPC crypto to which new key is attached
  984. * @ukey: the user key
  985. * @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
  986. * @master_key: specify this is a cluster master key
  987. *
  988. * A new TIPC AEAD key will be allocated and initiated with the specified user
  989. * key, then attached to the TIPC crypto.
  990. *
  991. * Return: new key id in case of success, otherwise: < 0
  992. */
  993. int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
  994. u8 mode, bool master_key)
  995. {
  996. struct tipc_aead *aead = NULL;
  997. int rc = 0;
  998. /* Initiate with the new user key */
  999. rc = tipc_aead_init(&aead, ukey, mode);
  1000. /* Attach it to the crypto */
  1001. if (likely(!rc)) {
  1002. rc = tipc_crypto_key_attach(c, aead, 0, master_key);
  1003. if (rc < 0)
  1004. tipc_aead_free(&aead->rcu);
  1005. }
  1006. return rc;
  1007. }
  1008. /**
  1009. * tipc_crypto_key_attach - Attach a new AEAD key to TIPC crypto
  1010. * @c: TIPC crypto to which the new AEAD key is attached
  1011. * @aead: the new AEAD key pointer
  1012. * @pos: desired slot in the crypto key array, = 0 if any!
  1013. * @master_key: specify this is a cluster master key
  1014. *
  1015. * Return: new key id in case of success, otherwise: -EBUSY
  1016. */
  1017. static int tipc_crypto_key_attach(struct tipc_crypto *c,
  1018. struct tipc_aead *aead, u8 pos,
  1019. bool master_key)
  1020. {
  1021. struct tipc_key key;
  1022. int rc = -EBUSY;
  1023. u8 new_key;
  1024. spin_lock_bh(&c->lock);
  1025. key = c->key;
  1026. if (master_key) {
  1027. new_key = KEY_MASTER;
  1028. goto attach;
  1029. }
  1030. if (key.active && key.passive)
  1031. goto exit;
  1032. if (key.pending) {
  1033. if (tipc_aead_users(c->aead[key.pending]) > 0)
  1034. goto exit;
  1035. /* if (pos): ok with replacing, will be aligned when needed */
  1036. /* Replace it */
  1037. new_key = key.pending;
  1038. } else {
  1039. if (pos) {
  1040. if (key.active && pos != key_next(key.active)) {
  1041. key.passive = pos;
  1042. new_key = pos;
  1043. goto attach;
  1044. } else if (!key.active && !key.passive) {
  1045. key.pending = pos;
  1046. new_key = pos;
  1047. goto attach;
  1048. }
  1049. }
  1050. key.pending = key_next(key.active ?: key.passive);
  1051. new_key = key.pending;
  1052. }
  1053. attach:
  1054. aead->crypto = c;
  1055. aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
  1056. tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
  1057. if (likely(c->key.keys != key.keys))
  1058. tipc_crypto_key_set_state(c, key.passive, key.active,
  1059. key.pending);
  1060. c->working = 1;
  1061. c->nokey = 0;
  1062. c->key_master |= master_key;
  1063. rc = new_key;
  1064. exit:
  1065. spin_unlock_bh(&c->lock);
  1066. return rc;
  1067. }
  1068. void tipc_crypto_key_flush(struct tipc_crypto *c)
  1069. {
  1070. struct tipc_crypto *tx, *rx;
  1071. int k;
  1072. spin_lock_bh(&c->lock);
  1073. if (is_rx(c)) {
  1074. /* Try to cancel pending work */
  1075. rx = c;
  1076. tx = tipc_net(rx->net)->crypto_tx;
  1077. if (cancel_delayed_work(&rx->work)) {
  1078. kfree(rx->skey);
  1079. rx->skey = NULL;
  1080. atomic_xchg(&rx->key_distr, 0);
  1081. tipc_node_put(rx->node);
  1082. }
  1083. /* RX stopping => decrease TX key users if any */
  1084. k = atomic_xchg(&rx->peer_rx_active, 0);
  1085. if (k) {
  1086. tipc_aead_users_dec(tx->aead[k], 0);
  1087. /* Mark the point TX key users changed */
  1088. tx->timer1 = jiffies;
  1089. }
  1090. }
  1091. c->flags = 0;
  1092. tipc_crypto_key_set_state(c, 0, 0, 0);
  1093. for (k = KEY_MIN; k <= KEY_MAX; k++)
  1094. tipc_crypto_key_detach(c->aead[k], &c->lock);
  1095. atomic64_set(&c->sndnxt, 0);
  1096. spin_unlock_bh(&c->lock);
  1097. }
  1098. /**
  1099. * tipc_crypto_key_try_align - Align RX keys if possible
  1100. * @rx: RX crypto handle
  1101. * @new_pending: new pending slot if aligned (= TX key from peer)
  1102. *
  1103. * Peer has used an unknown key slot, this only happens when peer has left and
  1104. * rejoned, or we are newcomer.
  1105. * That means, there must be no active key but a pending key at unaligned slot.
  1106. * If so, we try to move the pending key to the new slot.
  1107. * Note: A potential passive key can exist, it will be shifted correspondingly!
  1108. *
  1109. * Return: "true" if key is successfully aligned, otherwise "false"
  1110. */
  1111. static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
  1112. {
  1113. struct tipc_aead *tmp1, *tmp2 = NULL;
  1114. struct tipc_key key;
  1115. bool aligned = false;
  1116. u8 new_passive = 0;
  1117. int x;
  1118. spin_lock(&rx->lock);
  1119. key = rx->key;
  1120. if (key.pending == new_pending) {
  1121. aligned = true;
  1122. goto exit;
  1123. }
  1124. if (key.active)
  1125. goto exit;
  1126. if (!key.pending)
  1127. goto exit;
  1128. if (tipc_aead_users(rx->aead[key.pending]) > 0)
  1129. goto exit;
  1130. /* Try to "isolate" this pending key first */
  1131. tmp1 = tipc_aead_rcu_ptr(rx->aead[key.pending], &rx->lock);
  1132. if (!refcount_dec_if_one(&tmp1->refcnt))
  1133. goto exit;
  1134. rcu_assign_pointer(rx->aead[key.pending], NULL);
  1135. /* Move passive key if any */
  1136. if (key.passive) {
  1137. tmp2 = rcu_replace_pointer(rx->aead[key.passive], tmp2, lockdep_is_held(&rx->lock));
  1138. x = (key.passive - key.pending + new_pending) % KEY_MAX;
  1139. new_passive = (x <= 0) ? x + KEY_MAX : x;
  1140. }
  1141. /* Re-allocate the key(s) */
  1142. tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
  1143. rcu_assign_pointer(rx->aead[new_pending], tmp1);
  1144. if (new_passive)
  1145. rcu_assign_pointer(rx->aead[new_passive], tmp2);
  1146. refcount_set(&tmp1->refcnt, 1);
  1147. aligned = true;
  1148. pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
  1149. new_pending);
  1150. exit:
  1151. spin_unlock(&rx->lock);
  1152. return aligned;
  1153. }
  1154. /**
  1155. * tipc_crypto_key_pick_tx - Pick one TX key for message decryption
  1156. * @tx: TX crypto handle
  1157. * @rx: RX crypto handle (can be NULL)
  1158. * @skb: the message skb which will be decrypted later
  1159. * @tx_key: peer TX key id
  1160. *
  1161. * This function looks up the existing TX keys and pick one which is suitable
  1162. * for the message decryption, that must be a cluster key and not used before
  1163. * on the same message (i.e. recursive).
  1164. *
  1165. * Return: the TX AEAD key handle in case of success, otherwise NULL
  1166. */
  1167. static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
  1168. struct tipc_crypto *rx,
  1169. struct sk_buff *skb,
  1170. u8 tx_key)
  1171. {
  1172. struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
  1173. struct tipc_aead *aead = NULL;
  1174. struct tipc_key key = tx->key;
  1175. u8 k, i = 0;
  1176. /* Initialize data if not yet */
  1177. if (!skb_cb->tx_clone_deferred) {
  1178. skb_cb->tx_clone_deferred = 1;
  1179. memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
  1180. }
  1181. skb_cb->tx_clone_ctx.rx = rx;
  1182. if (++skb_cb->tx_clone_ctx.recurs > 2)
  1183. return NULL;
  1184. /* Pick one TX key */
  1185. spin_lock(&tx->lock);
  1186. if (tx_key == KEY_MASTER) {
  1187. aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
  1188. goto done;
  1189. }
  1190. do {
  1191. k = (i == 0) ? key.pending :
  1192. ((i == 1) ? key.active : key.passive);
  1193. if (!k)
  1194. continue;
  1195. aead = tipc_aead_rcu_ptr(tx->aead[k], &tx->lock);
  1196. if (!aead)
  1197. continue;
  1198. if (aead->mode != CLUSTER_KEY ||
  1199. aead == skb_cb->tx_clone_ctx.last) {
  1200. aead = NULL;
  1201. continue;
  1202. }
  1203. /* Ok, found one cluster key */
  1204. skb_cb->tx_clone_ctx.last = aead;
  1205. WARN_ON(skb->next);
  1206. skb->next = skb_clone(skb, GFP_ATOMIC);
  1207. if (unlikely(!skb->next))
  1208. pr_warn("Failed to clone skb for next round if any\n");
  1209. break;
  1210. } while (++i < 3);
  1211. done:
  1212. if (likely(aead))
  1213. WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
  1214. spin_unlock(&tx->lock);
  1215. return aead;
  1216. }
  1217. /**
  1218. * tipc_crypto_key_synch: Synch own key data according to peer key status
  1219. * @rx: RX crypto handle
  1220. * @skb: TIPCv2 message buffer (incl. the ehdr from peer)
  1221. *
  1222. * This function updates the peer node related data as the peer RX active key
  1223. * has changed, so the number of TX keys' users on this node are increased and
  1224. * decreased correspondingly.
  1225. *
  1226. * It also considers if peer has no key, then we need to make own master key
  1227. * (if any) taking over i.e. starting grace period and also trigger key
  1228. * distributing process.
  1229. *
  1230. * The "per-peer" sndnxt is also reset when the peer key has switched.
  1231. */
  1232. static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
  1233. {
  1234. struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
  1235. struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
  1236. struct tipc_msg *hdr = buf_msg(skb);
  1237. u32 self = tipc_own_addr(rx->net);
  1238. u8 cur, new;
  1239. unsigned long delay;
  1240. /* Update RX 'key_master' flag according to peer, also mark "legacy" if
  1241. * a peer has no master key.
  1242. */
  1243. rx->key_master = ehdr->master_key;
  1244. if (!rx->key_master)
  1245. tx->legacy_user = 1;
  1246. /* For later cases, apply only if message is destined to this node */
  1247. if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
  1248. return;
  1249. /* Case 1: Peer has no keys, let's make master key take over */
  1250. if (ehdr->rx_nokey) {
  1251. /* Set or extend grace period */
  1252. tx->timer2 = jiffies;
  1253. /* Schedule key distributing for the peer if not yet */
  1254. if (tx->key.keys &&
  1255. !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
  1256. get_random_bytes(&delay, 2);
  1257. delay %= 5;
  1258. delay = msecs_to_jiffies(500 * ++delay);
  1259. if (queue_delayed_work(tx->wq, &rx->work, delay))
  1260. tipc_node_get(rx->node);
  1261. }
  1262. } else {
  1263. /* Cancel a pending key distributing if any */
  1264. atomic_xchg(&rx->key_distr, 0);
  1265. }
  1266. /* Case 2: Peer RX active key has changed, let's update own TX users */
  1267. cur = atomic_read(&rx->peer_rx_active);
  1268. new = ehdr->rx_key_active;
  1269. if (tx->key.keys &&
  1270. cur != new &&
  1271. atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
  1272. if (new)
  1273. tipc_aead_users_inc(tx->aead[new], INT_MAX);
  1274. if (cur)
  1275. tipc_aead_users_dec(tx->aead[cur], 0);
  1276. atomic64_set(&rx->sndnxt, 0);
  1277. /* Mark the point TX key users changed */
  1278. tx->timer1 = jiffies;
  1279. pr_debug("%s: key users changed %d-- %d++, peer %s\n",
  1280. tx->name, cur, new, rx->name);
  1281. }
  1282. }
  1283. static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
  1284. {
  1285. struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
  1286. struct tipc_key key;
  1287. spin_lock_bh(&tx->lock);
  1288. key = tx->key;
  1289. WARN_ON(!key.active || tx_key != key.active);
  1290. /* Free the active key */
  1291. tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
  1292. tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
  1293. spin_unlock_bh(&tx->lock);
  1294. pr_warn("%s: key is revoked\n", tx->name);
  1295. return -EKEYREVOKED;
  1296. }
  1297. int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
  1298. struct tipc_node *node)
  1299. {
  1300. struct tipc_crypto *c;
  1301. if (*crypto)
  1302. return -EEXIST;
  1303. /* Allocate crypto */
  1304. c = kzalloc(sizeof(*c), GFP_ATOMIC);
  1305. if (!c)
  1306. return -ENOMEM;
  1307. /* Allocate workqueue on TX */
  1308. if (!node) {
  1309. c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
  1310. if (!c->wq) {
  1311. kfree(c);
  1312. return -ENOMEM;
  1313. }
  1314. }
  1315. /* Allocate statistic structure */
  1316. c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
  1317. if (!c->stats) {
  1318. if (c->wq)
  1319. destroy_workqueue(c->wq);
  1320. kfree_sensitive(c);
  1321. return -ENOMEM;
  1322. }
  1323. c->flags = 0;
  1324. c->net = net;
  1325. c->node = node;
  1326. get_random_bytes(&c->key_gen, 2);
  1327. tipc_crypto_key_set_state(c, 0, 0, 0);
  1328. atomic_set(&c->key_distr, 0);
  1329. atomic_set(&c->peer_rx_active, 0);
  1330. atomic64_set(&c->sndnxt, 0);
  1331. c->timer1 = jiffies;
  1332. c->timer2 = jiffies;
  1333. c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
  1334. spin_lock_init(&c->lock);
  1335. scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
  1336. (is_rx(c)) ? tipc_node_get_id_str(c->node) :
  1337. tipc_own_id_string(c->net));
  1338. if (is_rx(c))
  1339. INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
  1340. else
  1341. INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
  1342. *crypto = c;
  1343. return 0;
  1344. }
  1345. void tipc_crypto_stop(struct tipc_crypto **crypto)
  1346. {
  1347. struct tipc_crypto *c = *crypto;
  1348. u8 k;
  1349. if (!c)
  1350. return;
  1351. /* Flush any queued works & destroy wq */
  1352. if (is_tx(c)) {
  1353. c->rekeying_intv = 0;
  1354. cancel_delayed_work_sync(&c->work);
  1355. destroy_workqueue(c->wq);
  1356. }
  1357. /* Release AEAD keys */
  1358. rcu_read_lock();
  1359. for (k = KEY_MIN; k <= KEY_MAX; k++)
  1360. tipc_aead_put(rcu_dereference(c->aead[k]));
  1361. rcu_read_unlock();
  1362. pr_debug("%s: has been stopped\n", c->name);
  1363. /* Free this crypto statistics */
  1364. free_percpu(c->stats);
  1365. *crypto = NULL;
  1366. kfree_sensitive(c);
  1367. }
  1368. void tipc_crypto_timeout(struct tipc_crypto *rx)
  1369. {
  1370. struct tipc_net *tn = tipc_net(rx->net);
  1371. struct tipc_crypto *tx = tn->crypto_tx;
  1372. struct tipc_key key;
  1373. int cmd;
  1374. /* TX pending: taking all users & stable -> active */
  1375. spin_lock(&tx->lock);
  1376. key = tx->key;
  1377. if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
  1378. goto s1;
  1379. if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
  1380. goto s1;
  1381. if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
  1382. goto s1;
  1383. tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
  1384. if (key.active)
  1385. tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
  1386. this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
  1387. pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
  1388. s1:
  1389. spin_unlock(&tx->lock);
  1390. /* RX pending: having user -> active */
  1391. spin_lock(&rx->lock);
  1392. key = rx->key;
  1393. if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
  1394. goto s2;
  1395. if (key.active)
  1396. key.passive = key.active;
  1397. key.active = key.pending;
  1398. rx->timer2 = jiffies;
  1399. tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
  1400. this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
  1401. pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
  1402. goto s5;
  1403. s2:
  1404. /* RX pending: not working -> remove */
  1405. if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
  1406. goto s3;
  1407. tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
  1408. tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
  1409. pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
  1410. goto s5;
  1411. s3:
  1412. /* RX active: timed out or no user -> pending */
  1413. if (!key.active)
  1414. goto s4;
  1415. if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
  1416. tipc_aead_users(rx->aead[key.active]) > 0)
  1417. goto s4;
  1418. if (key.pending)
  1419. key.passive = key.active;
  1420. else
  1421. key.pending = key.active;
  1422. rx->timer2 = jiffies;
  1423. tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
  1424. tipc_aead_users_set(rx->aead[key.pending], 0);
  1425. pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
  1426. goto s5;
  1427. s4:
  1428. /* RX passive: outdated or not working -> free */
  1429. if (!key.passive)
  1430. goto s5;
  1431. if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
  1432. tipc_aead_users(rx->aead[key.passive]) > -10)
  1433. goto s5;
  1434. tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
  1435. tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
  1436. pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
  1437. s5:
  1438. spin_unlock(&rx->lock);
  1439. /* Relax it here, the flag will be set again if it really is, but only
  1440. * when we are not in grace period for safety!
  1441. */
  1442. if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
  1443. tx->legacy_user = 0;
  1444. /* Limit max_tfms & do debug commands if needed */
  1445. if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
  1446. return;
  1447. cmd = sysctl_tipc_max_tfms;
  1448. sysctl_tipc_max_tfms = TIPC_MAX_TFMS_DEF;
  1449. tipc_crypto_do_cmd(rx->net, cmd);
  1450. }
  1451. static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
  1452. struct tipc_bearer *b,
  1453. struct tipc_media_addr *dst,
  1454. struct tipc_node *__dnode, u8 type)
  1455. {
  1456. struct sk_buff *skb;
  1457. skb = skb_clone(_skb, GFP_ATOMIC);
  1458. if (skb) {
  1459. TIPC_SKB_CB(skb)->xmit_type = type;
  1460. tipc_crypto_xmit(net, &skb, b, dst, __dnode);
  1461. if (skb)
  1462. b->media->send_msg(net, skb, b, dst);
  1463. }
  1464. }
  1465. /**
  1466. * tipc_crypto_xmit - Build & encrypt TIPC message for xmit
  1467. * @net: struct net
  1468. * @skb: input/output message skb pointer
  1469. * @b: bearer used for xmit later
  1470. * @dst: destination media address
  1471. * @__dnode: destination node for reference if any
  1472. *
  1473. * First, build an encryption message header on the top of the message, then
  1474. * encrypt the original TIPC message by using the pending, master or active
  1475. * key with this preference order.
  1476. * If the encryption is successful, the encrypted skb is returned directly or
  1477. * via the callback.
  1478. * Otherwise, the skb is freed!
  1479. *
  1480. * Return:
  1481. * * 0 : the encryption has succeeded (or no encryption)
  1482. * * -EINPROGRESS/-EBUSY : the encryption is ongoing, a callback will be made
  1483. * * -ENOKEK : the encryption has failed due to no key
  1484. * * -EKEYREVOKED : the encryption has failed due to key revoked
  1485. * * -ENOMEM : the encryption has failed due to no memory
  1486. * * < 0 : the encryption has failed due to other reasons
  1487. */
  1488. int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
  1489. struct tipc_bearer *b, struct tipc_media_addr *dst,
  1490. struct tipc_node *__dnode)
  1491. {
  1492. struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
  1493. struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
  1494. struct tipc_crypto_stats __percpu *stats = tx->stats;
  1495. struct tipc_msg *hdr = buf_msg(*skb);
  1496. struct tipc_key key = tx->key;
  1497. struct tipc_aead *aead = NULL;
  1498. u32 user = msg_user(hdr);
  1499. u32 type = msg_type(hdr);
  1500. int rc = -ENOKEY;
  1501. u8 tx_key = 0;
  1502. /* No encryption? */
  1503. if (!tx->working)
  1504. return 0;
  1505. /* Pending key if peer has active on it or probing time */
  1506. if (unlikely(key.pending)) {
  1507. tx_key = key.pending;
  1508. if (!tx->key_master && !key.active)
  1509. goto encrypt;
  1510. if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
  1511. goto encrypt;
  1512. if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
  1513. pr_debug("%s: probing for key[%d]\n", tx->name,
  1514. key.pending);
  1515. goto encrypt;
  1516. }
  1517. if (user == LINK_CONFIG || user == LINK_PROTOCOL)
  1518. tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
  1519. SKB_PROBING);
  1520. }
  1521. /* Master key if this is a *vital* message or in grace period */
  1522. if (tx->key_master) {
  1523. tx_key = KEY_MASTER;
  1524. if (!key.active)
  1525. goto encrypt;
  1526. if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
  1527. pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
  1528. user, type);
  1529. goto encrypt;
  1530. }
  1531. if (user == LINK_CONFIG ||
  1532. (user == LINK_PROTOCOL && type == RESET_MSG) ||
  1533. (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
  1534. time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
  1535. if (__rx && __rx->key_master &&
  1536. !atomic_read(&__rx->peer_rx_active))
  1537. goto encrypt;
  1538. if (!__rx) {
  1539. if (likely(!tx->legacy_user))
  1540. goto encrypt;
  1541. tipc_crypto_clone_msg(net, *skb, b, dst,
  1542. __dnode, SKB_GRACING);
  1543. }
  1544. }
  1545. }
  1546. /* Else, use the active key if any */
  1547. if (likely(key.active)) {
  1548. tx_key = key.active;
  1549. goto encrypt;
  1550. }
  1551. goto exit;
  1552. encrypt:
  1553. aead = tipc_aead_get(tx->aead[tx_key]);
  1554. if (unlikely(!aead))
  1555. goto exit;
  1556. rc = tipc_ehdr_build(net, aead, tx_key, *skb, __rx);
  1557. if (likely(rc > 0))
  1558. rc = tipc_aead_encrypt(aead, *skb, b, dst, __dnode);
  1559. exit:
  1560. switch (rc) {
  1561. case 0:
  1562. this_cpu_inc(stats->stat[STAT_OK]);
  1563. break;
  1564. case -EINPROGRESS:
  1565. case -EBUSY:
  1566. this_cpu_inc(stats->stat[STAT_ASYNC]);
  1567. *skb = NULL;
  1568. return rc;
  1569. default:
  1570. this_cpu_inc(stats->stat[STAT_NOK]);
  1571. if (rc == -ENOKEY)
  1572. this_cpu_inc(stats->stat[STAT_NOKEYS]);
  1573. else if (rc == -EKEYREVOKED)
  1574. this_cpu_inc(stats->stat[STAT_BADKEYS]);
  1575. kfree_skb(*skb);
  1576. *skb = NULL;
  1577. break;
  1578. }
  1579. tipc_aead_put(aead);
  1580. return rc;
  1581. }
  1582. /**
  1583. * tipc_crypto_rcv - Decrypt an encrypted TIPC message from peer
  1584. * @net: struct net
  1585. * @rx: RX crypto handle
  1586. * @skb: input/output message skb pointer
  1587. * @b: bearer where the message has been received
  1588. *
  1589. * If the decryption is successful, the decrypted skb is returned directly or
  1590. * as the callback, the encryption header and auth tag will be trimed out
  1591. * before forwarding to tipc_rcv() via the tipc_crypto_rcv_complete().
  1592. * Otherwise, the skb will be freed!
  1593. * Note: RX key(s) can be re-aligned, or in case of no key suitable, TX
  1594. * cluster key(s) can be taken for decryption (- recursive).
  1595. *
  1596. * Return:
  1597. * * 0 : the decryption has successfully completed
  1598. * * -EINPROGRESS/-EBUSY : the decryption is ongoing, a callback will be made
  1599. * * -ENOKEY : the decryption has failed due to no key
  1600. * * -EBADMSG : the decryption has failed due to bad message
  1601. * * -ENOMEM : the decryption has failed due to no memory
  1602. * * < 0 : the decryption has failed due to other reasons
  1603. */
  1604. int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
  1605. struct sk_buff **skb, struct tipc_bearer *b)
  1606. {
  1607. struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
  1608. struct tipc_crypto_stats __percpu *stats;
  1609. struct tipc_aead *aead = NULL;
  1610. struct tipc_key key;
  1611. int rc = -ENOKEY;
  1612. u8 tx_key, n;
  1613. tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
  1614. /* New peer?
  1615. * Let's try with TX key (i.e. cluster mode) & verify the skb first!
  1616. */
  1617. if (unlikely(!rx || tx_key == KEY_MASTER))
  1618. goto pick_tx;
  1619. /* Pick RX key according to TX key if any */
  1620. key = rx->key;
  1621. if (tx_key == key.active || tx_key == key.pending ||
  1622. tx_key == key.passive)
  1623. goto decrypt;
  1624. /* Unknown key, let's try to align RX key(s) */
  1625. if (tipc_crypto_key_try_align(rx, tx_key))
  1626. goto decrypt;
  1627. pick_tx:
  1628. /* No key suitable? Try to pick one from TX... */
  1629. aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
  1630. if (aead)
  1631. goto decrypt;
  1632. goto exit;
  1633. decrypt:
  1634. rcu_read_lock();
  1635. if (!aead)
  1636. aead = tipc_aead_get(rx->aead[tx_key]);
  1637. rc = tipc_aead_decrypt(net, aead, *skb, b);
  1638. rcu_read_unlock();
  1639. exit:
  1640. stats = ((rx) ?: tx)->stats;
  1641. switch (rc) {
  1642. case 0:
  1643. this_cpu_inc(stats->stat[STAT_OK]);
  1644. break;
  1645. case -EINPROGRESS:
  1646. case -EBUSY:
  1647. this_cpu_inc(stats->stat[STAT_ASYNC]);
  1648. *skb = NULL;
  1649. return rc;
  1650. default:
  1651. this_cpu_inc(stats->stat[STAT_NOK]);
  1652. if (rc == -ENOKEY) {
  1653. kfree_skb(*skb);
  1654. *skb = NULL;
  1655. if (rx) {
  1656. /* Mark rx->nokey only if we dont have a
  1657. * pending received session key, nor a newer
  1658. * one i.e. in the next slot.
  1659. */
  1660. n = key_next(tx_key);
  1661. rx->nokey = !(rx->skey ||
  1662. rcu_access_pointer(rx->aead[n]));
  1663. pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
  1664. rx->name, rx->nokey,
  1665. tx_key, rx->key.keys);
  1666. tipc_node_put(rx->node);
  1667. }
  1668. this_cpu_inc(stats->stat[STAT_NOKEYS]);
  1669. return rc;
  1670. } else if (rc == -EBADMSG) {
  1671. this_cpu_inc(stats->stat[STAT_BADMSGS]);
  1672. }
  1673. break;
  1674. }
  1675. tipc_crypto_rcv_complete(net, aead, b, skb, rc);
  1676. return rc;
  1677. }
  1678. static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
  1679. struct tipc_bearer *b,
  1680. struct sk_buff **skb, int err)
  1681. {
  1682. struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(*skb);
  1683. struct tipc_crypto *rx = aead->crypto;
  1684. struct tipc_aead *tmp = NULL;
  1685. struct tipc_ehdr *ehdr;
  1686. struct tipc_node *n;
  1687. /* Is this completed by TX? */
  1688. if (unlikely(is_tx(aead->crypto))) {
  1689. rx = skb_cb->tx_clone_ctx.rx;
  1690. pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
  1691. (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
  1692. (*skb)->next, skb_cb->flags);
  1693. pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
  1694. skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
  1695. aead->crypto->aead[1], aead->crypto->aead[2],
  1696. aead->crypto->aead[3]);
  1697. if (unlikely(err)) {
  1698. if (err == -EBADMSG && (*skb)->next)
  1699. tipc_rcv(net, (*skb)->next, b);
  1700. goto free_skb;
  1701. }
  1702. if (likely((*skb)->next)) {
  1703. kfree_skb((*skb)->next);
  1704. (*skb)->next = NULL;
  1705. }
  1706. ehdr = (struct tipc_ehdr *)(*skb)->data;
  1707. if (!rx) {
  1708. WARN_ON(ehdr->user != LINK_CONFIG);
  1709. n = tipc_node_create(net, 0, ehdr->id, 0xffffu, 0,
  1710. true);
  1711. rx = tipc_node_crypto_rx(n);
  1712. if (unlikely(!rx))
  1713. goto free_skb;
  1714. }
  1715. /* Ignore cloning if it was TX master key */
  1716. if (ehdr->tx_key == KEY_MASTER)
  1717. goto rcv;
  1718. if (tipc_aead_clone(&tmp, aead) < 0)
  1719. goto rcv;
  1720. WARN_ON(!refcount_inc_not_zero(&tmp->refcnt));
  1721. if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
  1722. tipc_aead_free(&tmp->rcu);
  1723. goto rcv;
  1724. }
  1725. tipc_aead_put(aead);
  1726. aead = tmp;
  1727. }
  1728. if (unlikely(err)) {
  1729. tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN);
  1730. goto free_skb;
  1731. }
  1732. /* Set the RX key's user */
  1733. tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1);
  1734. /* Mark this point, RX works */
  1735. rx->timer1 = jiffies;
  1736. rcv:
  1737. /* Remove ehdr & auth. tag prior to tipc_rcv() */
  1738. ehdr = (struct tipc_ehdr *)(*skb)->data;
  1739. /* Mark this point, RX passive still works */
  1740. if (rx->key.passive && ehdr->tx_key == rx->key.passive)
  1741. rx->timer2 = jiffies;
  1742. skb_reset_network_header(*skb);
  1743. skb_pull(*skb, tipc_ehdr_size(ehdr));
  1744. if (pskb_trim(*skb, (*skb)->len - aead->authsize))
  1745. goto free_skb;
  1746. /* Validate TIPCv2 message */
  1747. if (unlikely(!tipc_msg_validate(skb))) {
  1748. pr_err_ratelimited("Packet dropped after decryption!\n");
  1749. goto free_skb;
  1750. }
  1751. /* Ok, everything's fine, try to synch own keys according to peers' */
  1752. tipc_crypto_key_synch(rx, *skb);
  1753. /* Re-fetch skb cb as skb might be changed in tipc_msg_validate */
  1754. skb_cb = TIPC_SKB_CB(*skb);
  1755. /* Mark skb decrypted */
  1756. skb_cb->decrypted = 1;
  1757. /* Clear clone cxt if any */
  1758. if (likely(!skb_cb->tx_clone_deferred))
  1759. goto exit;
  1760. skb_cb->tx_clone_deferred = 0;
  1761. memset(&skb_cb->tx_clone_ctx, 0, sizeof(skb_cb->tx_clone_ctx));
  1762. goto exit;
  1763. free_skb:
  1764. kfree_skb(*skb);
  1765. *skb = NULL;
  1766. exit:
  1767. tipc_aead_put(aead);
  1768. if (rx)
  1769. tipc_node_put(rx->node);
  1770. }
  1771. static void tipc_crypto_do_cmd(struct net *net, int cmd)
  1772. {
  1773. struct tipc_net *tn = tipc_net(net);
  1774. struct tipc_crypto *tx = tn->crypto_tx, *rx;
  1775. struct list_head *p;
  1776. unsigned int stat;
  1777. int i, j, cpu;
  1778. char buf[200];
  1779. /* Currently only one command is supported */
  1780. switch (cmd) {
  1781. case 0xfff1:
  1782. goto print_stats;
  1783. default:
  1784. return;
  1785. }
  1786. print_stats:
  1787. /* Print a header */
  1788. pr_info("\n=============== TIPC Crypto Statistics ===============\n\n");
  1789. /* Print key status */
  1790. pr_info("Key status:\n");
  1791. pr_info("TX(%7.7s)\n%s", tipc_own_id_string(net),
  1792. tipc_crypto_key_dump(tx, buf));
  1793. rcu_read_lock();
  1794. for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
  1795. rx = tipc_node_crypto_rx_by_list(p);
  1796. pr_info("RX(%7.7s)\n%s", tipc_node_get_id_str(rx->node),
  1797. tipc_crypto_key_dump(rx, buf));
  1798. }
  1799. rcu_read_unlock();
  1800. /* Print crypto statistics */
  1801. for (i = 0, j = 0; i < MAX_STATS; i++)
  1802. j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
  1803. pr_info("Counter %s", buf);
  1804. memset(buf, '-', 115);
  1805. buf[115] = '\0';
  1806. pr_info("%s\n", buf);
  1807. j = scnprintf(buf, 200, "TX(%7.7s) ", tipc_own_id_string(net));
  1808. for_each_possible_cpu(cpu) {
  1809. for (i = 0; i < MAX_STATS; i++) {
  1810. stat = per_cpu_ptr(tx->stats, cpu)->stat[i];
  1811. j += scnprintf(buf + j, 200 - j, "|%11d ", stat);
  1812. }
  1813. pr_info("%s", buf);
  1814. j = scnprintf(buf, 200, "%12s", " ");
  1815. }
  1816. rcu_read_lock();
  1817. for (p = tn->node_list.next; p != &tn->node_list; p = p->next) {
  1818. rx = tipc_node_crypto_rx_by_list(p);
  1819. j = scnprintf(buf, 200, "RX(%7.7s) ",
  1820. tipc_node_get_id_str(rx->node));
  1821. for_each_possible_cpu(cpu) {
  1822. for (i = 0; i < MAX_STATS; i++) {
  1823. stat = per_cpu_ptr(rx->stats, cpu)->stat[i];
  1824. j += scnprintf(buf + j, 200 - j, "|%11d ",
  1825. stat);
  1826. }
  1827. pr_info("%s", buf);
  1828. j = scnprintf(buf, 200, "%12s", " ");
  1829. }
  1830. }
  1831. rcu_read_unlock();
  1832. pr_info("\n======================== Done ========================\n");
  1833. }
  1834. static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
  1835. {
  1836. struct tipc_key key = c->key;
  1837. struct tipc_aead *aead;
  1838. int k, i = 0;
  1839. char *s;
  1840. for (k = KEY_MIN; k <= KEY_MAX; k++) {
  1841. if (k == KEY_MASTER) {
  1842. if (is_rx(c))
  1843. continue;
  1844. if (time_before(jiffies,
  1845. c->timer2 + TIPC_TX_GRACE_PERIOD))
  1846. s = "ACT";
  1847. else
  1848. s = "PAS";
  1849. } else {
  1850. if (k == key.passive)
  1851. s = "PAS";
  1852. else if (k == key.active)
  1853. s = "ACT";
  1854. else if (k == key.pending)
  1855. s = "PEN";
  1856. else
  1857. s = "-";
  1858. }
  1859. i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
  1860. rcu_read_lock();
  1861. aead = rcu_dereference(c->aead[k]);
  1862. if (aead)
  1863. i += scnprintf(buf + i, 200 - i,
  1864. "{\"0x...%s\", \"%s\"}/%d:%d",
  1865. aead->hint,
  1866. (aead->mode == CLUSTER_KEY) ? "c" : "p",
  1867. atomic_read(&aead->users),
  1868. refcount_read(&aead->refcnt));
  1869. rcu_read_unlock();
  1870. i += scnprintf(buf + i, 200 - i, "\n");
  1871. }
  1872. if (is_rx(c))
  1873. i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
  1874. atomic_read(&c->peer_rx_active));
  1875. return buf;
  1876. }
  1877. static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
  1878. char *buf)
  1879. {
  1880. struct tipc_key *key = &old;
  1881. int k, i = 0;
  1882. char *s;
  1883. /* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
  1884. again:
  1885. i += scnprintf(buf + i, 32 - i, "[");
  1886. for (k = KEY_1; k <= KEY_3; k++) {
  1887. if (k == key->passive)
  1888. s = "pas";
  1889. else if (k == key->active)
  1890. s = "act";
  1891. else if (k == key->pending)
  1892. s = "pen";
  1893. else
  1894. s = "-";
  1895. i += scnprintf(buf + i, 32 - i,
  1896. (k != KEY_3) ? "%s " : "%s", s);
  1897. }
  1898. if (key != &new) {
  1899. i += scnprintf(buf + i, 32 - i, "] -> ");
  1900. key = &new;
  1901. goto again;
  1902. }
  1903. i += scnprintf(buf + i, 32 - i, "]");
  1904. return buf;
  1905. }
  1906. /**
  1907. * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
  1908. * @net: the struct net
  1909. * @skb: the receiving message buffer
  1910. */
  1911. void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
  1912. {
  1913. struct tipc_crypto *rx;
  1914. struct tipc_msg *hdr;
  1915. if (unlikely(skb_linearize(skb)))
  1916. goto exit;
  1917. hdr = buf_msg(skb);
  1918. rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
  1919. if (unlikely(!rx))
  1920. goto exit;
  1921. switch (msg_type(hdr)) {
  1922. case KEY_DISTR_MSG:
  1923. if (tipc_crypto_key_rcv(rx, hdr))
  1924. goto exit;
  1925. break;
  1926. default:
  1927. break;
  1928. }
  1929. tipc_node_put(rx->node);
  1930. exit:
  1931. kfree_skb(skb);
  1932. }
  1933. /**
  1934. * tipc_crypto_key_distr - Distribute a TX key
  1935. * @tx: the TX crypto
  1936. * @key: the key's index
  1937. * @dest: the destination tipc node, = NULL if distributing to all nodes
  1938. *
  1939. * Return: 0 in case of success, otherwise < 0
  1940. */
  1941. int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
  1942. struct tipc_node *dest)
  1943. {
  1944. struct tipc_aead *aead;
  1945. u32 dnode = tipc_node_get_addr(dest);
  1946. int rc = -ENOKEY;
  1947. if (!sysctl_tipc_key_exchange_enabled)
  1948. return 0;
  1949. if (key) {
  1950. rcu_read_lock();
  1951. aead = tipc_aead_get(tx->aead[key]);
  1952. if (likely(aead)) {
  1953. rc = tipc_crypto_key_xmit(tx->net, aead->key,
  1954. aead->gen, aead->mode,
  1955. dnode);
  1956. tipc_aead_put(aead);
  1957. }
  1958. rcu_read_unlock();
  1959. }
  1960. return rc;
  1961. }
  1962. /**
  1963. * tipc_crypto_key_xmit - Send a session key
  1964. * @net: the struct net
  1965. * @skey: the session key to be sent
  1966. * @gen: the key's generation
  1967. * @mode: the key's mode
  1968. * @dnode: the destination node address, = 0 if broadcasting to all nodes
  1969. *
  1970. * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG'
  1971. * as its data section, then xmit-ed through the uc/bc link.
  1972. *
  1973. * Return: 0 in case of success, otherwise < 0
  1974. */
  1975. static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
  1976. u16 gen, u8 mode, u32 dnode)
  1977. {
  1978. struct sk_buff_head pkts;
  1979. struct tipc_msg *hdr;
  1980. struct sk_buff *skb;
  1981. u16 size, cong_link_cnt;
  1982. u8 *data;
  1983. int rc;
  1984. size = tipc_aead_key_size(skey);
  1985. skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
  1986. if (!skb)
  1987. return -ENOMEM;
  1988. hdr = buf_msg(skb);
  1989. tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
  1990. INT_H_SIZE, dnode);
  1991. msg_set_size(hdr, INT_H_SIZE + size);
  1992. msg_set_key_gen(hdr, gen);
  1993. msg_set_key_mode(hdr, mode);
  1994. data = msg_data(hdr);
  1995. *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
  1996. memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
  1997. memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
  1998. skey->keylen);
  1999. __skb_queue_head_init(&pkts);
  2000. __skb_queue_tail(&pkts, skb);
  2001. if (dnode)
  2002. rc = tipc_node_xmit(net, &pkts, dnode, 0);
  2003. else
  2004. rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
  2005. return rc;
  2006. }
  2007. /**
  2008. * tipc_crypto_key_rcv - Receive a session key
  2009. * @rx: the RX crypto
  2010. * @hdr: the TIPC v2 message incl. the receiving session key in its data
  2011. *
  2012. * This function retrieves the session key in the message from peer, then
  2013. * schedules a RX work to attach the key to the corresponding RX crypto.
  2014. *
  2015. * Return: "true" if the key has been scheduled for attaching, otherwise
  2016. * "false".
  2017. */
  2018. static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
  2019. {
  2020. struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
  2021. struct tipc_aead_key *skey = NULL;
  2022. u16 key_gen = msg_key_gen(hdr);
  2023. u32 size = msg_data_sz(hdr);
  2024. u8 *data = msg_data(hdr);
  2025. unsigned int keylen;
  2026. /* Verify whether the size can exist in the packet */
  2027. if (unlikely(size < sizeof(struct tipc_aead_key) + TIPC_AEAD_KEYLEN_MIN)) {
  2028. pr_debug("%s: message data size is too small\n", rx->name);
  2029. goto exit;
  2030. }
  2031. keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
  2032. /* Verify the supplied size values */
  2033. if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX ||
  2034. size != keylen + sizeof(struct tipc_aead_key))) {
  2035. pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name);
  2036. goto exit;
  2037. }
  2038. spin_lock(&rx->lock);
  2039. if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
  2040. pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
  2041. rx->skey, key_gen, rx->key_gen);
  2042. goto exit_unlock;
  2043. }
  2044. /* Allocate memory for the key */
  2045. skey = kmalloc(size, GFP_ATOMIC);
  2046. if (unlikely(!skey)) {
  2047. pr_err("%s: unable to allocate memory for skey\n", rx->name);
  2048. goto exit_unlock;
  2049. }
  2050. /* Copy key from msg data */
  2051. skey->keylen = keylen;
  2052. memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
  2053. memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
  2054. skey->keylen);
  2055. rx->key_gen = key_gen;
  2056. rx->skey_mode = msg_key_mode(hdr);
  2057. rx->skey = skey;
  2058. rx->nokey = 0;
  2059. mb(); /* for nokey flag */
  2060. exit_unlock:
  2061. spin_unlock(&rx->lock);
  2062. exit:
  2063. /* Schedule the key attaching on this crypto */
  2064. if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
  2065. return true;
  2066. return false;
  2067. }
  2068. /**
  2069. * tipc_crypto_work_rx - Scheduled RX works handler
  2070. * @work: the struct RX work
  2071. *
  2072. * The function processes the previous scheduled works i.e. distributing TX key
  2073. * or attaching a received session key on RX crypto.
  2074. */
  2075. static void tipc_crypto_work_rx(struct work_struct *work)
  2076. {
  2077. struct delayed_work *dwork = to_delayed_work(work);
  2078. struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
  2079. struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
  2080. unsigned long delay = msecs_to_jiffies(5000);
  2081. bool resched = false;
  2082. u8 key;
  2083. int rc;
  2084. /* Case 1: Distribute TX key to peer if scheduled */
  2085. if (atomic_cmpxchg(&rx->key_distr,
  2086. KEY_DISTR_SCHED,
  2087. KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
  2088. /* Always pick the newest one for distributing */
  2089. key = tx->key.pending ?: tx->key.active;
  2090. rc = tipc_crypto_key_distr(tx, key, rx->node);
  2091. if (unlikely(rc))
  2092. pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
  2093. tx->name, key, tipc_node_get_id_str(rx->node),
  2094. rc);
  2095. /* Sched for key_distr releasing */
  2096. resched = true;
  2097. } else {
  2098. atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
  2099. }
  2100. /* Case 2: Attach a pending received session key from peer if any */
  2101. if (rx->skey) {
  2102. rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
  2103. if (unlikely(rc < 0))
  2104. pr_warn("%s: unable to attach received skey, err %d\n",
  2105. rx->name, rc);
  2106. switch (rc) {
  2107. case -EBUSY:
  2108. case -ENOMEM:
  2109. /* Resched the key attaching */
  2110. resched = true;
  2111. break;
  2112. default:
  2113. synchronize_rcu();
  2114. kfree(rx->skey);
  2115. rx->skey = NULL;
  2116. break;
  2117. }
  2118. }
  2119. if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
  2120. return;
  2121. tipc_node_put(rx->node);
  2122. }
  2123. /**
  2124. * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
  2125. * @tx: TX crypto
  2126. * @changed: if the rekeying needs to be rescheduled with new interval
  2127. * @new_intv: new rekeying interval (when "changed" = true)
  2128. */
  2129. void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
  2130. u32 new_intv)
  2131. {
  2132. unsigned long delay;
  2133. bool now = false;
  2134. if (changed) {
  2135. if (new_intv == TIPC_REKEYING_NOW)
  2136. now = true;
  2137. else
  2138. tx->rekeying_intv = new_intv;
  2139. cancel_delayed_work_sync(&tx->work);
  2140. }
  2141. if (tx->rekeying_intv || now) {
  2142. delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
  2143. queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
  2144. }
  2145. }
  2146. /**
  2147. * tipc_crypto_work_tx - Scheduled TX works handler
  2148. * @work: the struct TX work
  2149. *
  2150. * The function processes the previous scheduled work, i.e. key rekeying, by
  2151. * generating a new session key based on current one, then attaching it to the
  2152. * TX crypto and finally distributing it to peers. It also re-schedules the
  2153. * rekeying if needed.
  2154. */
  2155. static void tipc_crypto_work_tx(struct work_struct *work)
  2156. {
  2157. struct delayed_work *dwork = to_delayed_work(work);
  2158. struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
  2159. struct tipc_aead_key *skey = NULL;
  2160. struct tipc_key key = tx->key;
  2161. struct tipc_aead *aead;
  2162. int rc = -ENOMEM;
  2163. if (unlikely(key.pending))
  2164. goto resched;
  2165. /* Take current key as a template */
  2166. rcu_read_lock();
  2167. aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
  2168. if (unlikely(!aead)) {
  2169. rcu_read_unlock();
  2170. /* At least one key should exist for securing */
  2171. return;
  2172. }
  2173. /* Lets duplicate it first */
  2174. skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
  2175. rcu_read_unlock();
  2176. /* Now, generate new key, initiate & distribute it */
  2177. if (likely(skey)) {
  2178. rc = tipc_aead_key_generate(skey) ?:
  2179. tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
  2180. if (likely(rc > 0))
  2181. rc = tipc_crypto_key_distr(tx, rc, NULL);
  2182. kfree_sensitive(skey);
  2183. }
  2184. if (unlikely(rc))
  2185. pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
  2186. resched:
  2187. /* Re-schedule rekeying if any */
  2188. tipc_crypto_rekeying_sched(tx, false, 0);
  2189. }