flow_table.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2007-2014 Nicira, Inc.
  4. */
  5. #include "flow.h"
  6. #include "datapath.h"
  7. #include "flow_netlink.h"
  8. #include <linux/uaccess.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/if_ether.h>
  12. #include <linux/if_vlan.h>
  13. #include <net/llc_pdu.h>
  14. #include <linux/kernel.h>
  15. #include <linux/jhash.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/llc.h>
  18. #include <linux/module.h>
  19. #include <linux/in.h>
  20. #include <linux/rcupdate.h>
  21. #include <linux/cpumask.h>
  22. #include <linux/if_arp.h>
  23. #include <linux/ip.h>
  24. #include <linux/ipv6.h>
  25. #include <linux/sctp.h>
  26. #include <linux/tcp.h>
  27. #include <linux/udp.h>
  28. #include <linux/icmp.h>
  29. #include <linux/icmpv6.h>
  30. #include <linux/rculist.h>
  31. #include <linux/sort.h>
  32. #include <net/ip.h>
  33. #include <net/ipv6.h>
  34. #include <net/ndisc.h>
  35. #define TBL_MIN_BUCKETS 1024
  36. #define MASK_ARRAY_SIZE_MIN 16
  37. #define REHASH_INTERVAL (10 * 60 * HZ)
  38. #define MC_DEFAULT_HASH_ENTRIES 256
  39. #define MC_HASH_SHIFT 8
  40. #define MC_HASH_SEGS ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
  41. static struct kmem_cache *flow_cache;
  42. struct kmem_cache *flow_stats_cache __read_mostly;
  43. static u16 range_n_bytes(const struct sw_flow_key_range *range)
  44. {
  45. return range->end - range->start;
  46. }
  47. void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
  48. bool full, const struct sw_flow_mask *mask)
  49. {
  50. int start = full ? 0 : mask->range.start;
  51. int len = full ? sizeof *dst : range_n_bytes(&mask->range);
  52. const long *m = (const long *)((const u8 *)&mask->key + start);
  53. const long *s = (const long *)((const u8 *)src + start);
  54. long *d = (long *)((u8 *)dst + start);
  55. int i;
  56. /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
  57. * if 'full' is false the memory outside of the 'mask->range' is left
  58. * uninitialized. This can be used as an optimization when further
  59. * operations on 'dst' only use contents within 'mask->range'.
  60. */
  61. for (i = 0; i < len; i += sizeof(long))
  62. *d++ = *s++ & *m++;
  63. }
  64. struct sw_flow *ovs_flow_alloc(void)
  65. {
  66. struct sw_flow *flow;
  67. struct sw_flow_stats *stats;
  68. flow = kmem_cache_zalloc(flow_cache, GFP_KERNEL);
  69. if (!flow)
  70. return ERR_PTR(-ENOMEM);
  71. flow->stats_last_writer = -1;
  72. flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
  73. /* Initialize the default stat node. */
  74. stats = kmem_cache_alloc_node(flow_stats_cache,
  75. GFP_KERNEL | __GFP_ZERO,
  76. node_online(0) ? 0 : NUMA_NO_NODE);
  77. if (!stats)
  78. goto err;
  79. spin_lock_init(&stats->lock);
  80. RCU_INIT_POINTER(flow->stats[0], stats);
  81. cpumask_set_cpu(0, flow->cpu_used_mask);
  82. return flow;
  83. err:
  84. kmem_cache_free(flow_cache, flow);
  85. return ERR_PTR(-ENOMEM);
  86. }
  87. int ovs_flow_tbl_count(const struct flow_table *table)
  88. {
  89. return table->count;
  90. }
  91. static void flow_free(struct sw_flow *flow)
  92. {
  93. int cpu;
  94. if (ovs_identifier_is_key(&flow->id))
  95. kfree(flow->id.unmasked_key);
  96. if (flow->sf_acts)
  97. ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
  98. flow->sf_acts);
  99. /* We open code this to make sure cpu 0 is always considered */
  100. for (cpu = 0; cpu < nr_cpu_ids;
  101. cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
  102. if (flow->stats[cpu])
  103. kmem_cache_free(flow_stats_cache,
  104. (struct sw_flow_stats __force *)flow->stats[cpu]);
  105. }
  106. kmem_cache_free(flow_cache, flow);
  107. }
  108. static void rcu_free_flow_callback(struct rcu_head *rcu)
  109. {
  110. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  111. flow_free(flow);
  112. }
  113. void ovs_flow_free(struct sw_flow *flow, bool deferred)
  114. {
  115. if (!flow)
  116. return;
  117. if (deferred)
  118. call_rcu(&flow->rcu, rcu_free_flow_callback);
  119. else
  120. flow_free(flow);
  121. }
  122. static void __table_instance_destroy(struct table_instance *ti)
  123. {
  124. kvfree(ti->buckets);
  125. kfree(ti);
  126. }
  127. static struct table_instance *table_instance_alloc(int new_size)
  128. {
  129. struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
  130. int i;
  131. if (!ti)
  132. return NULL;
  133. ti->buckets = kvmalloc_array(new_size, sizeof(struct hlist_head),
  134. GFP_KERNEL);
  135. if (!ti->buckets) {
  136. kfree(ti);
  137. return NULL;
  138. }
  139. for (i = 0; i < new_size; i++)
  140. INIT_HLIST_HEAD(&ti->buckets[i]);
  141. ti->n_buckets = new_size;
  142. ti->node_ver = 0;
  143. get_random_bytes(&ti->hash_seed, sizeof(u32));
  144. return ti;
  145. }
  146. static void __mask_array_destroy(struct mask_array *ma)
  147. {
  148. free_percpu(ma->masks_usage_stats);
  149. kfree(ma);
  150. }
  151. static void mask_array_rcu_cb(struct rcu_head *rcu)
  152. {
  153. struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
  154. __mask_array_destroy(ma);
  155. }
  156. static void tbl_mask_array_reset_counters(struct mask_array *ma)
  157. {
  158. int i, cpu;
  159. /* As the per CPU counters are not atomic we can not go ahead and
  160. * reset them from another CPU. To be able to still have an approximate
  161. * zero based counter we store the value at reset, and subtract it
  162. * later when processing.
  163. */
  164. for (i = 0; i < ma->max; i++) {
  165. ma->masks_usage_zero_cntr[i] = 0;
  166. for_each_possible_cpu(cpu) {
  167. struct mask_array_stats *stats;
  168. unsigned int start;
  169. u64 counter;
  170. stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
  171. do {
  172. start = u64_stats_fetch_begin(&stats->syncp);
  173. counter = stats->usage_cntrs[i];
  174. } while (u64_stats_fetch_retry(&stats->syncp, start));
  175. ma->masks_usage_zero_cntr[i] += counter;
  176. }
  177. }
  178. }
  179. static struct mask_array *tbl_mask_array_alloc(int size)
  180. {
  181. struct mask_array *new;
  182. size = max(MASK_ARRAY_SIZE_MIN, size);
  183. new = kzalloc(struct_size(new, masks, size) +
  184. sizeof(u64) * size, GFP_KERNEL);
  185. if (!new)
  186. return NULL;
  187. new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
  188. struct_size(new, masks, size));
  189. new->masks_usage_stats = __alloc_percpu(sizeof(struct mask_array_stats) +
  190. sizeof(u64) * size,
  191. __alignof__(u64));
  192. if (!new->masks_usage_stats) {
  193. kfree(new);
  194. return NULL;
  195. }
  196. new->count = 0;
  197. new->max = size;
  198. return new;
  199. }
  200. static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
  201. {
  202. struct mask_array *old;
  203. struct mask_array *new;
  204. new = tbl_mask_array_alloc(size);
  205. if (!new)
  206. return -ENOMEM;
  207. old = ovsl_dereference(tbl->mask_array);
  208. if (old) {
  209. int i;
  210. for (i = 0; i < old->max; i++) {
  211. if (ovsl_dereference(old->masks[i]))
  212. new->masks[new->count++] = old->masks[i];
  213. }
  214. call_rcu(&old->rcu, mask_array_rcu_cb);
  215. }
  216. rcu_assign_pointer(tbl->mask_array, new);
  217. return 0;
  218. }
  219. static int tbl_mask_array_add_mask(struct flow_table *tbl,
  220. struct sw_flow_mask *new)
  221. {
  222. struct mask_array *ma = ovsl_dereference(tbl->mask_array);
  223. int err, ma_count = READ_ONCE(ma->count);
  224. if (ma_count >= ma->max) {
  225. err = tbl_mask_array_realloc(tbl, ma->max +
  226. MASK_ARRAY_SIZE_MIN);
  227. if (err)
  228. return err;
  229. ma = ovsl_dereference(tbl->mask_array);
  230. } else {
  231. /* On every add or delete we need to reset the counters so
  232. * every new mask gets a fair chance of being prioritized.
  233. */
  234. tbl_mask_array_reset_counters(ma);
  235. }
  236. BUG_ON(ovsl_dereference(ma->masks[ma_count]));
  237. rcu_assign_pointer(ma->masks[ma_count], new);
  238. WRITE_ONCE(ma->count, ma_count + 1);
  239. return 0;
  240. }
  241. static void tbl_mask_array_del_mask(struct flow_table *tbl,
  242. struct sw_flow_mask *mask)
  243. {
  244. struct mask_array *ma = ovsl_dereference(tbl->mask_array);
  245. int i, ma_count = READ_ONCE(ma->count);
  246. /* Remove the deleted mask pointers from the array */
  247. for (i = 0; i < ma_count; i++) {
  248. if (mask == ovsl_dereference(ma->masks[i]))
  249. goto found;
  250. }
  251. BUG();
  252. return;
  253. found:
  254. WRITE_ONCE(ma->count, ma_count - 1);
  255. rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
  256. RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
  257. kfree_rcu(mask, rcu);
  258. /* Shrink the mask array if necessary. */
  259. if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
  260. ma_count <= (ma->max / 3))
  261. tbl_mask_array_realloc(tbl, ma->max / 2);
  262. else
  263. tbl_mask_array_reset_counters(ma);
  264. }
  265. /* Remove 'mask' from the mask list, if it is not needed any more. */
  266. static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask)
  267. {
  268. if (mask) {
  269. /* ovs-lock is required to protect mask-refcount and
  270. * mask list.
  271. */
  272. ASSERT_OVSL();
  273. BUG_ON(!mask->ref_count);
  274. mask->ref_count--;
  275. if (!mask->ref_count)
  276. tbl_mask_array_del_mask(tbl, mask);
  277. }
  278. }
  279. static void __mask_cache_destroy(struct mask_cache *mc)
  280. {
  281. free_percpu(mc->mask_cache);
  282. kfree(mc);
  283. }
  284. static void mask_cache_rcu_cb(struct rcu_head *rcu)
  285. {
  286. struct mask_cache *mc = container_of(rcu, struct mask_cache, rcu);
  287. __mask_cache_destroy(mc);
  288. }
  289. static struct mask_cache *tbl_mask_cache_alloc(u32 size)
  290. {
  291. struct mask_cache_entry __percpu *cache = NULL;
  292. struct mask_cache *new;
  293. /* Only allow size to be 0, or a power of 2, and does not exceed
  294. * percpu allocation size.
  295. */
  296. if ((!is_power_of_2(size) && size != 0) ||
  297. (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
  298. return NULL;
  299. new = kzalloc(sizeof(*new), GFP_KERNEL);
  300. if (!new)
  301. return NULL;
  302. new->cache_size = size;
  303. if (new->cache_size > 0) {
  304. cache = __alloc_percpu(array_size(sizeof(struct mask_cache_entry),
  305. new->cache_size),
  306. __alignof__(struct mask_cache_entry));
  307. if (!cache) {
  308. kfree(new);
  309. return NULL;
  310. }
  311. }
  312. new->mask_cache = cache;
  313. return new;
  314. }
  315. int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size)
  316. {
  317. struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
  318. struct mask_cache *new;
  319. if (size == mc->cache_size)
  320. return 0;
  321. if ((!is_power_of_2(size) && size != 0) ||
  322. (size * sizeof(struct mask_cache_entry)) > PCPU_MIN_UNIT_SIZE)
  323. return -EINVAL;
  324. new = tbl_mask_cache_alloc(size);
  325. if (!new)
  326. return -ENOMEM;
  327. rcu_assign_pointer(table->mask_cache, new);
  328. call_rcu(&mc->rcu, mask_cache_rcu_cb);
  329. return 0;
  330. }
  331. int ovs_flow_tbl_init(struct flow_table *table)
  332. {
  333. struct table_instance *ti, *ufid_ti;
  334. struct mask_cache *mc;
  335. struct mask_array *ma;
  336. mc = tbl_mask_cache_alloc(MC_DEFAULT_HASH_ENTRIES);
  337. if (!mc)
  338. return -ENOMEM;
  339. ma = tbl_mask_array_alloc(MASK_ARRAY_SIZE_MIN);
  340. if (!ma)
  341. goto free_mask_cache;
  342. ti = table_instance_alloc(TBL_MIN_BUCKETS);
  343. if (!ti)
  344. goto free_mask_array;
  345. ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  346. if (!ufid_ti)
  347. goto free_ti;
  348. rcu_assign_pointer(table->ti, ti);
  349. rcu_assign_pointer(table->ufid_ti, ufid_ti);
  350. rcu_assign_pointer(table->mask_array, ma);
  351. rcu_assign_pointer(table->mask_cache, mc);
  352. table->last_rehash = jiffies;
  353. table->count = 0;
  354. table->ufid_count = 0;
  355. return 0;
  356. free_ti:
  357. __table_instance_destroy(ti);
  358. free_mask_array:
  359. __mask_array_destroy(ma);
  360. free_mask_cache:
  361. __mask_cache_destroy(mc);
  362. return -ENOMEM;
  363. }
  364. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  365. {
  366. struct table_instance *ti;
  367. ti = container_of(rcu, struct table_instance, rcu);
  368. __table_instance_destroy(ti);
  369. }
  370. static void table_instance_flow_free(struct flow_table *table,
  371. struct table_instance *ti,
  372. struct table_instance *ufid_ti,
  373. struct sw_flow *flow)
  374. {
  375. hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
  376. table->count--;
  377. if (ovs_identifier_is_ufid(&flow->id)) {
  378. hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
  379. table->ufid_count--;
  380. }
  381. flow_mask_remove(table, flow->mask);
  382. }
  383. /* Must be called with OVS mutex held. */
  384. void table_instance_flow_flush(struct flow_table *table,
  385. struct table_instance *ti,
  386. struct table_instance *ufid_ti)
  387. {
  388. int i;
  389. for (i = 0; i < ti->n_buckets; i++) {
  390. struct hlist_head *head = &ti->buckets[i];
  391. struct hlist_node *n;
  392. struct sw_flow *flow;
  393. hlist_for_each_entry_safe(flow, n, head,
  394. flow_table.node[ti->node_ver]) {
  395. table_instance_flow_free(table, ti, ufid_ti,
  396. flow);
  397. ovs_flow_free(flow, true);
  398. }
  399. }
  400. if (WARN_ON(table->count != 0 ||
  401. table->ufid_count != 0)) {
  402. table->count = 0;
  403. table->ufid_count = 0;
  404. }
  405. }
  406. static void table_instance_destroy(struct table_instance *ti,
  407. struct table_instance *ufid_ti)
  408. {
  409. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  410. call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
  411. }
  412. /* No need for locking this function is called from RCU callback or
  413. * error path.
  414. */
  415. void ovs_flow_tbl_destroy(struct flow_table *table)
  416. {
  417. struct table_instance *ti = rcu_dereference_raw(table->ti);
  418. struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
  419. struct mask_cache *mc = rcu_dereference_raw(table->mask_cache);
  420. struct mask_array *ma = rcu_dereference_raw(table->mask_array);
  421. call_rcu(&mc->rcu, mask_cache_rcu_cb);
  422. call_rcu(&ma->rcu, mask_array_rcu_cb);
  423. table_instance_destroy(ti, ufid_ti);
  424. }
  425. struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
  426. u32 *bucket, u32 *last)
  427. {
  428. struct sw_flow *flow;
  429. struct hlist_head *head;
  430. int ver;
  431. int i;
  432. ver = ti->node_ver;
  433. while (*bucket < ti->n_buckets) {
  434. i = 0;
  435. head = &ti->buckets[*bucket];
  436. hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) {
  437. if (i < *last) {
  438. i++;
  439. continue;
  440. }
  441. *last = i + 1;
  442. return flow;
  443. }
  444. (*bucket)++;
  445. *last = 0;
  446. }
  447. return NULL;
  448. }
  449. static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash)
  450. {
  451. hash = jhash_1word(hash, ti->hash_seed);
  452. return &ti->buckets[hash & (ti->n_buckets - 1)];
  453. }
  454. static void table_instance_insert(struct table_instance *ti,
  455. struct sw_flow *flow)
  456. {
  457. struct hlist_head *head;
  458. head = find_bucket(ti, flow->flow_table.hash);
  459. hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head);
  460. }
  461. static void ufid_table_instance_insert(struct table_instance *ti,
  462. struct sw_flow *flow)
  463. {
  464. struct hlist_head *head;
  465. head = find_bucket(ti, flow->ufid_table.hash);
  466. hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head);
  467. }
  468. static void flow_table_copy_flows(struct table_instance *old,
  469. struct table_instance *new, bool ufid)
  470. {
  471. int old_ver;
  472. int i;
  473. old_ver = old->node_ver;
  474. new->node_ver = !old_ver;
  475. /* Insert in new table. */
  476. for (i = 0; i < old->n_buckets; i++) {
  477. struct sw_flow *flow;
  478. struct hlist_head *head = &old->buckets[i];
  479. if (ufid)
  480. hlist_for_each_entry_rcu(flow, head,
  481. ufid_table.node[old_ver],
  482. lockdep_ovsl_is_held())
  483. ufid_table_instance_insert(new, flow);
  484. else
  485. hlist_for_each_entry_rcu(flow, head,
  486. flow_table.node[old_ver],
  487. lockdep_ovsl_is_held())
  488. table_instance_insert(new, flow);
  489. }
  490. }
  491. static struct table_instance *table_instance_rehash(struct table_instance *ti,
  492. int n_buckets, bool ufid)
  493. {
  494. struct table_instance *new_ti;
  495. new_ti = table_instance_alloc(n_buckets);
  496. if (!new_ti)
  497. return NULL;
  498. flow_table_copy_flows(ti, new_ti, ufid);
  499. return new_ti;
  500. }
  501. int ovs_flow_tbl_flush(struct flow_table *flow_table)
  502. {
  503. struct table_instance *old_ti, *new_ti;
  504. struct table_instance *old_ufid_ti, *new_ufid_ti;
  505. new_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  506. if (!new_ti)
  507. return -ENOMEM;
  508. new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS);
  509. if (!new_ufid_ti)
  510. goto err_free_ti;
  511. old_ti = ovsl_dereference(flow_table->ti);
  512. old_ufid_ti = ovsl_dereference(flow_table->ufid_ti);
  513. rcu_assign_pointer(flow_table->ti, new_ti);
  514. rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
  515. flow_table->last_rehash = jiffies;
  516. table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
  517. table_instance_destroy(old_ti, old_ufid_ti);
  518. return 0;
  519. err_free_ti:
  520. __table_instance_destroy(new_ti);
  521. return -ENOMEM;
  522. }
  523. static u32 flow_hash(const struct sw_flow_key *key,
  524. const struct sw_flow_key_range *range)
  525. {
  526. const u32 *hash_key = (const u32 *)((const u8 *)key + range->start);
  527. /* Make sure number of hash bytes are multiple of u32. */
  528. int hash_u32s = range_n_bytes(range) >> 2;
  529. return jhash2(hash_key, hash_u32s, 0);
  530. }
  531. static int flow_key_start(const struct sw_flow_key *key)
  532. {
  533. if (key->tun_proto)
  534. return 0;
  535. else
  536. return rounddown(offsetof(struct sw_flow_key, phy),
  537. sizeof(long));
  538. }
  539. static bool cmp_key(const struct sw_flow_key *key1,
  540. const struct sw_flow_key *key2,
  541. int key_start, int key_end)
  542. {
  543. const long *cp1 = (const long *)((const u8 *)key1 + key_start);
  544. const long *cp2 = (const long *)((const u8 *)key2 + key_start);
  545. int i;
  546. for (i = key_start; i < key_end; i += sizeof(long))
  547. if (*cp1++ ^ *cp2++)
  548. return false;
  549. return true;
  550. }
  551. static bool flow_cmp_masked_key(const struct sw_flow *flow,
  552. const struct sw_flow_key *key,
  553. const struct sw_flow_key_range *range)
  554. {
  555. return cmp_key(&flow->key, key, range->start, range->end);
  556. }
  557. static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
  558. const struct sw_flow_match *match)
  559. {
  560. struct sw_flow_key *key = match->key;
  561. int key_start = flow_key_start(key);
  562. int key_end = match->range.end;
  563. BUG_ON(ovs_identifier_is_ufid(&flow->id));
  564. return cmp_key(flow->id.unmasked_key, key, key_start, key_end);
  565. }
  566. static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
  567. const struct sw_flow_key *unmasked,
  568. const struct sw_flow_mask *mask,
  569. u32 *n_mask_hit)
  570. {
  571. struct sw_flow *flow;
  572. struct hlist_head *head;
  573. u32 hash;
  574. struct sw_flow_key masked_key;
  575. ovs_flow_mask_key(&masked_key, unmasked, false, mask);
  576. hash = flow_hash(&masked_key, &mask->range);
  577. head = find_bucket(ti, hash);
  578. (*n_mask_hit)++;
  579. hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
  580. lockdep_ovsl_is_held()) {
  581. if (flow->mask == mask && flow->flow_table.hash == hash &&
  582. flow_cmp_masked_key(flow, &masked_key, &mask->range))
  583. return flow;
  584. }
  585. return NULL;
  586. }
  587. /* Flow lookup does full lookup on flow table. It starts with
  588. * mask from index passed in *index.
  589. * This function MUST be called with BH disabled due to the use
  590. * of CPU specific variables.
  591. */
  592. static struct sw_flow *flow_lookup(struct flow_table *tbl,
  593. struct table_instance *ti,
  594. struct mask_array *ma,
  595. const struct sw_flow_key *key,
  596. u32 *n_mask_hit,
  597. u32 *n_cache_hit,
  598. u32 *index)
  599. {
  600. struct mask_array_stats *stats = this_cpu_ptr(ma->masks_usage_stats);
  601. struct sw_flow *flow;
  602. struct sw_flow_mask *mask;
  603. int i;
  604. if (likely(*index < ma->max)) {
  605. mask = rcu_dereference_ovsl(ma->masks[*index]);
  606. if (mask) {
  607. flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
  608. if (flow) {
  609. u64_stats_update_begin(&stats->syncp);
  610. stats->usage_cntrs[*index]++;
  611. u64_stats_update_end(&stats->syncp);
  612. (*n_cache_hit)++;
  613. return flow;
  614. }
  615. }
  616. }
  617. for (i = 0; i < ma->max; i++) {
  618. if (i == *index)
  619. continue;
  620. mask = rcu_dereference_ovsl(ma->masks[i]);
  621. if (unlikely(!mask))
  622. break;
  623. flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
  624. if (flow) { /* Found */
  625. *index = i;
  626. u64_stats_update_begin(&stats->syncp);
  627. stats->usage_cntrs[*index]++;
  628. u64_stats_update_end(&stats->syncp);
  629. return flow;
  630. }
  631. }
  632. return NULL;
  633. }
  634. /*
  635. * mask_cache maps flow to probable mask. This cache is not tightly
  636. * coupled cache, It means updates to mask list can result in inconsistent
  637. * cache entry in mask cache.
  638. * This is per cpu cache and is divided in MC_HASH_SEGS segments.
  639. * In case of a hash collision the entry is hashed in next segment.
  640. * */
  641. struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
  642. const struct sw_flow_key *key,
  643. u32 skb_hash,
  644. u32 *n_mask_hit,
  645. u32 *n_cache_hit)
  646. {
  647. struct mask_cache *mc = rcu_dereference(tbl->mask_cache);
  648. struct mask_array *ma = rcu_dereference(tbl->mask_array);
  649. struct table_instance *ti = rcu_dereference(tbl->ti);
  650. struct mask_cache_entry *entries, *ce;
  651. struct sw_flow *flow;
  652. u32 hash;
  653. int seg;
  654. *n_mask_hit = 0;
  655. *n_cache_hit = 0;
  656. if (unlikely(!skb_hash || mc->cache_size == 0)) {
  657. u32 mask_index = 0;
  658. u32 cache = 0;
  659. return flow_lookup(tbl, ti, ma, key, n_mask_hit, &cache,
  660. &mask_index);
  661. }
  662. /* Pre and post recirulation flows usually have the same skb_hash
  663. * value. To avoid hash collisions, rehash the 'skb_hash' with
  664. * 'recirc_id'. */
  665. if (key->recirc_id)
  666. skb_hash = jhash_1word(skb_hash, key->recirc_id);
  667. ce = NULL;
  668. hash = skb_hash;
  669. entries = this_cpu_ptr(mc->mask_cache);
  670. /* Find the cache entry 'ce' to operate on. */
  671. for (seg = 0; seg < MC_HASH_SEGS; seg++) {
  672. int index = hash & (mc->cache_size - 1);
  673. struct mask_cache_entry *e;
  674. e = &entries[index];
  675. if (e->skb_hash == skb_hash) {
  676. flow = flow_lookup(tbl, ti, ma, key, n_mask_hit,
  677. n_cache_hit, &e->mask_index);
  678. if (!flow)
  679. e->skb_hash = 0;
  680. return flow;
  681. }
  682. if (!ce || e->skb_hash < ce->skb_hash)
  683. ce = e; /* A better replacement cache candidate. */
  684. hash >>= MC_HASH_SHIFT;
  685. }
  686. /* Cache miss, do full lookup. */
  687. flow = flow_lookup(tbl, ti, ma, key, n_mask_hit, n_cache_hit,
  688. &ce->mask_index);
  689. if (flow)
  690. ce->skb_hash = skb_hash;
  691. *n_cache_hit = 0;
  692. return flow;
  693. }
  694. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
  695. const struct sw_flow_key *key)
  696. {
  697. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  698. struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
  699. u32 __always_unused n_mask_hit;
  700. u32 __always_unused n_cache_hit;
  701. struct sw_flow *flow;
  702. u32 index = 0;
  703. /* This function gets called trough the netlink interface and therefore
  704. * is preemptible. However, flow_lookup() function needs to be called
  705. * with BH disabled due to CPU specific variables.
  706. */
  707. local_bh_disable();
  708. flow = flow_lookup(tbl, ti, ma, key, &n_mask_hit, &n_cache_hit, &index);
  709. local_bh_enable();
  710. return flow;
  711. }
  712. struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
  713. const struct sw_flow_match *match)
  714. {
  715. struct mask_array *ma = ovsl_dereference(tbl->mask_array);
  716. int i;
  717. /* Always called under ovs-mutex. */
  718. for (i = 0; i < ma->max; i++) {
  719. struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
  720. u32 __always_unused n_mask_hit;
  721. struct sw_flow_mask *mask;
  722. struct sw_flow *flow;
  723. mask = ovsl_dereference(ma->masks[i]);
  724. if (!mask)
  725. continue;
  726. flow = masked_flow_lookup(ti, match->key, mask, &n_mask_hit);
  727. if (flow && ovs_identifier_is_key(&flow->id) &&
  728. ovs_flow_cmp_unmasked_key(flow, match)) {
  729. return flow;
  730. }
  731. }
  732. return NULL;
  733. }
  734. static u32 ufid_hash(const struct sw_flow_id *sfid)
  735. {
  736. return jhash(sfid->ufid, sfid->ufid_len, 0);
  737. }
  738. static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
  739. const struct sw_flow_id *sfid)
  740. {
  741. if (flow->id.ufid_len != sfid->ufid_len)
  742. return false;
  743. return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
  744. }
  745. bool ovs_flow_cmp(const struct sw_flow *flow,
  746. const struct sw_flow_match *match)
  747. {
  748. if (ovs_identifier_is_ufid(&flow->id))
  749. return flow_cmp_masked_key(flow, match->key, &match->range);
  750. return ovs_flow_cmp_unmasked_key(flow, match);
  751. }
  752. struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
  753. const struct sw_flow_id *ufid)
  754. {
  755. struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti);
  756. struct sw_flow *flow;
  757. struct hlist_head *head;
  758. u32 hash;
  759. hash = ufid_hash(ufid);
  760. head = find_bucket(ti, hash);
  761. hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
  762. lockdep_ovsl_is_held()) {
  763. if (flow->ufid_table.hash == hash &&
  764. ovs_flow_cmp_ufid(flow, ufid))
  765. return flow;
  766. }
  767. return NULL;
  768. }
  769. int ovs_flow_tbl_num_masks(const struct flow_table *table)
  770. {
  771. struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
  772. return READ_ONCE(ma->count);
  773. }
  774. u32 ovs_flow_tbl_masks_cache_size(const struct flow_table *table)
  775. {
  776. struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache);
  777. return READ_ONCE(mc->cache_size);
  778. }
  779. static struct table_instance *table_instance_expand(struct table_instance *ti,
  780. bool ufid)
  781. {
  782. return table_instance_rehash(ti, ti->n_buckets * 2, ufid);
  783. }
  784. /* Must be called with OVS mutex held. */
  785. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  786. {
  787. struct table_instance *ti = ovsl_dereference(table->ti);
  788. struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
  789. BUG_ON(table->count == 0);
  790. table_instance_flow_free(table, ti, ufid_ti, flow);
  791. }
  792. static struct sw_flow_mask *mask_alloc(void)
  793. {
  794. struct sw_flow_mask *mask;
  795. mask = kmalloc(sizeof(*mask), GFP_KERNEL);
  796. if (mask)
  797. mask->ref_count = 1;
  798. return mask;
  799. }
  800. static bool mask_equal(const struct sw_flow_mask *a,
  801. const struct sw_flow_mask *b)
  802. {
  803. const u8 *a_ = (const u8 *)&a->key + a->range.start;
  804. const u8 *b_ = (const u8 *)&b->key + b->range.start;
  805. return (a->range.end == b->range.end)
  806. && (a->range.start == b->range.start)
  807. && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
  808. }
  809. static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl,
  810. const struct sw_flow_mask *mask)
  811. {
  812. struct mask_array *ma;
  813. int i;
  814. ma = ovsl_dereference(tbl->mask_array);
  815. for (i = 0; i < ma->max; i++) {
  816. struct sw_flow_mask *t;
  817. t = ovsl_dereference(ma->masks[i]);
  818. if (t && mask_equal(mask, t))
  819. return t;
  820. }
  821. return NULL;
  822. }
  823. /* Add 'mask' into the mask list, if it is not already there. */
  824. static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
  825. const struct sw_flow_mask *new)
  826. {
  827. struct sw_flow_mask *mask;
  828. mask = flow_mask_find(tbl, new);
  829. if (!mask) {
  830. /* Allocate a new mask if none exists. */
  831. mask = mask_alloc();
  832. if (!mask)
  833. return -ENOMEM;
  834. mask->key = new->key;
  835. mask->range = new->range;
  836. /* Add mask to mask-list. */
  837. if (tbl_mask_array_add_mask(tbl, mask)) {
  838. kfree(mask);
  839. return -ENOMEM;
  840. }
  841. } else {
  842. BUG_ON(!mask->ref_count);
  843. mask->ref_count++;
  844. }
  845. flow->mask = mask;
  846. return 0;
  847. }
  848. /* Must be called with OVS mutex held. */
  849. static void flow_key_insert(struct flow_table *table, struct sw_flow *flow)
  850. {
  851. struct table_instance *new_ti = NULL;
  852. struct table_instance *ti;
  853. flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range);
  854. ti = ovsl_dereference(table->ti);
  855. table_instance_insert(ti, flow);
  856. table->count++;
  857. /* Expand table, if necessary, to make room. */
  858. if (table->count > ti->n_buckets)
  859. new_ti = table_instance_expand(ti, false);
  860. else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL))
  861. new_ti = table_instance_rehash(ti, ti->n_buckets, false);
  862. if (new_ti) {
  863. rcu_assign_pointer(table->ti, new_ti);
  864. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  865. table->last_rehash = jiffies;
  866. }
  867. }
  868. /* Must be called with OVS mutex held. */
  869. static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow)
  870. {
  871. struct table_instance *ti;
  872. flow->ufid_table.hash = ufid_hash(&flow->id);
  873. ti = ovsl_dereference(table->ufid_ti);
  874. ufid_table_instance_insert(ti, flow);
  875. table->ufid_count++;
  876. /* Expand table, if necessary, to make room. */
  877. if (table->ufid_count > ti->n_buckets) {
  878. struct table_instance *new_ti;
  879. new_ti = table_instance_expand(ti, true);
  880. if (new_ti) {
  881. rcu_assign_pointer(table->ufid_ti, new_ti);
  882. call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
  883. }
  884. }
  885. }
  886. /* Must be called with OVS mutex held. */
  887. int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
  888. const struct sw_flow_mask *mask)
  889. {
  890. int err;
  891. err = flow_mask_insert(table, flow, mask);
  892. if (err)
  893. return err;
  894. flow_key_insert(table, flow);
  895. if (ovs_identifier_is_ufid(&flow->id))
  896. flow_ufid_insert(table, flow);
  897. return 0;
  898. }
  899. static int compare_mask_and_count(const void *a, const void *b)
  900. {
  901. const struct mask_count *mc_a = a;
  902. const struct mask_count *mc_b = b;
  903. return (s64)mc_b->counter - (s64)mc_a->counter;
  904. }
  905. /* Must be called with OVS mutex held. */
  906. void ovs_flow_masks_rebalance(struct flow_table *table)
  907. {
  908. struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
  909. struct mask_count *masks_and_count;
  910. struct mask_array *new;
  911. int masks_entries = 0;
  912. int i;
  913. /* Build array of all current entries with use counters. */
  914. masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
  915. GFP_KERNEL);
  916. if (!masks_and_count)
  917. return;
  918. for (i = 0; i < ma->max; i++) {
  919. struct sw_flow_mask *mask;
  920. int cpu;
  921. mask = rcu_dereference_ovsl(ma->masks[i]);
  922. if (unlikely(!mask))
  923. break;
  924. masks_and_count[i].index = i;
  925. masks_and_count[i].counter = 0;
  926. for_each_possible_cpu(cpu) {
  927. struct mask_array_stats *stats;
  928. unsigned int start;
  929. u64 counter;
  930. stats = per_cpu_ptr(ma->masks_usage_stats, cpu);
  931. do {
  932. start = u64_stats_fetch_begin(&stats->syncp);
  933. counter = stats->usage_cntrs[i];
  934. } while (u64_stats_fetch_retry(&stats->syncp, start));
  935. masks_and_count[i].counter += counter;
  936. }
  937. /* Subtract the zero count value. */
  938. masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
  939. /* Rather than calling tbl_mask_array_reset_counters()
  940. * below when no change is needed, do it inline here.
  941. */
  942. ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
  943. }
  944. if (i == 0)
  945. goto free_mask_entries;
  946. /* Sort the entries */
  947. masks_entries = i;
  948. sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
  949. compare_mask_and_count, NULL);
  950. /* If the order is the same, nothing to do... */
  951. for (i = 0; i < masks_entries; i++) {
  952. if (i != masks_and_count[i].index)
  953. break;
  954. }
  955. if (i == masks_entries)
  956. goto free_mask_entries;
  957. /* Rebuilt the new list in order of usage. */
  958. new = tbl_mask_array_alloc(ma->max);
  959. if (!new)
  960. goto free_mask_entries;
  961. for (i = 0; i < masks_entries; i++) {
  962. int index = masks_and_count[i].index;
  963. if (ovsl_dereference(ma->masks[index]))
  964. new->masks[new->count++] = ma->masks[index];
  965. }
  966. rcu_assign_pointer(table->mask_array, new);
  967. call_rcu(&ma->rcu, mask_array_rcu_cb);
  968. free_mask_entries:
  969. kfree(masks_and_count);
  970. }
  971. /* Initializes the flow module.
  972. * Returns zero if successful or a negative error code. */
  973. int ovs_flow_init(void)
  974. {
  975. BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
  976. BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
  977. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
  978. + (nr_cpu_ids
  979. * sizeof(struct sw_flow_stats *))
  980. + cpumask_size(),
  981. 0, 0, NULL);
  982. if (flow_cache == NULL)
  983. return -ENOMEM;
  984. flow_stats_cache
  985. = kmem_cache_create("sw_flow_stats", sizeof(struct sw_flow_stats),
  986. 0, SLAB_HWCACHE_ALIGN, NULL);
  987. if (flow_stats_cache == NULL) {
  988. kmem_cache_destroy(flow_cache);
  989. flow_cache = NULL;
  990. return -ENOMEM;
  991. }
  992. return 0;
  993. }
  994. /* Uninitializes the flow module. */
  995. void ovs_flow_exit(void)
  996. {
  997. kmem_cache_destroy(flow_stats_cache);
  998. kmem_cache_destroy(flow_cache);
  999. }