core-topology.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Incremental bus scan, based on bus topology
  4. *
  5. * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/errno.h>
  9. #include <linux/firewire.h>
  10. #include <linux/firewire-constants.h>
  11. #include <linux/jiffies.h>
  12. #include <linux/kernel.h>
  13. #include <linux/list.h>
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/atomic.h>
  18. #include <asm/byteorder.h>
  19. #include "core.h"
  20. #include "phy-packet-definitions.h"
  21. #include <trace/events/firewire.h>
  22. static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
  23. {
  24. struct fw_node *node;
  25. node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
  26. if (node == NULL)
  27. return NULL;
  28. node->color = color;
  29. node->node_id = LOCAL_BUS | phy_packet_self_id_get_phy_id(sid);
  30. node->link_on = phy_packet_self_id_zero_get_link_active(sid);
  31. // NOTE: Only two bits, thus only for SCODE_100, SCODE_200, SCODE_400, and SCODE_BETA.
  32. node->phy_speed = phy_packet_self_id_zero_get_scode(sid);
  33. node->initiated_reset = phy_packet_self_id_zero_get_initiated_reset(sid);
  34. node->port_count = port_count;
  35. kref_init(&node->kref);
  36. INIT_LIST_HEAD(&node->link);
  37. return node;
  38. }
  39. /*
  40. * Compute the maximum hop count for this node and it's children. The
  41. * maximum hop count is the maximum number of connections between any
  42. * two nodes in the subtree rooted at this node. We need this for
  43. * setting the gap count. As we build the tree bottom up in
  44. * build_tree() below, this is fairly easy to do: for each node we
  45. * maintain the max hop count and the max depth, ie the number of hops
  46. * to the furthest leaf. Computing the max hop count breaks down into
  47. * two cases: either the path goes through this node, in which case
  48. * the hop count is the sum of the two biggest child depths plus 2.
  49. * Or it could be the case that the max hop path is entirely
  50. * containted in a child tree, in which case the max hop count is just
  51. * the max hop count of this child.
  52. */
  53. static void update_hop_count(struct fw_node *node)
  54. {
  55. int depths[2] = { -1, -1 };
  56. int max_child_hops = 0;
  57. int i;
  58. for (i = 0; i < node->port_count; i++) {
  59. if (node->ports[i] == NULL)
  60. continue;
  61. if (node->ports[i]->max_hops > max_child_hops)
  62. max_child_hops = node->ports[i]->max_hops;
  63. if (node->ports[i]->max_depth > depths[0]) {
  64. depths[1] = depths[0];
  65. depths[0] = node->ports[i]->max_depth;
  66. } else if (node->ports[i]->max_depth > depths[1])
  67. depths[1] = node->ports[i]->max_depth;
  68. }
  69. node->max_depth = depths[0] + 1;
  70. node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
  71. }
  72. static inline struct fw_node *fw_node(struct list_head *l)
  73. {
  74. return list_entry(l, struct fw_node, link);
  75. }
  76. /*
  77. * This function builds the tree representation of the topology given
  78. * by the self IDs from the latest bus reset. During the construction
  79. * of the tree, the function checks that the self IDs are valid and
  80. * internally consistent. On success this function returns the
  81. * fw_node corresponding to the local card otherwise NULL.
  82. */
  83. static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self_id_count,
  84. unsigned int generation)
  85. {
  86. struct self_id_sequence_enumerator enumerator = {
  87. .cursor = sid,
  88. .quadlet_count = self_id_count,
  89. };
  90. struct fw_node *node, *child, *local_node, *irm_node;
  91. struct list_head stack;
  92. int phy_id, stack_depth;
  93. int gap_count;
  94. bool beta_repeaters_present;
  95. local_node = NULL;
  96. node = NULL;
  97. INIT_LIST_HEAD(&stack);
  98. stack_depth = 0;
  99. phy_id = 0;
  100. irm_node = NULL;
  101. gap_count = phy_packet_self_id_zero_get_gap_count(*sid);
  102. beta_repeaters_present = false;
  103. while (enumerator.quadlet_count > 0) {
  104. unsigned int child_port_count = 0;
  105. unsigned int total_port_count = 0;
  106. unsigned int parent_count = 0;
  107. unsigned int quadlet_count;
  108. const u32 *self_id_sequence;
  109. unsigned int port_capacity;
  110. enum phy_packet_self_id_port_status port_status;
  111. unsigned int port_index;
  112. struct list_head *h;
  113. int i;
  114. self_id_sequence = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
  115. if (IS_ERR(self_id_sequence)) {
  116. if (PTR_ERR(self_id_sequence) != -ENODATA) {
  117. fw_err(card, "inconsistent extended self IDs: %ld\n",
  118. PTR_ERR(self_id_sequence));
  119. return NULL;
  120. }
  121. break;
  122. }
  123. port_capacity = self_id_sequence_get_port_capacity(quadlet_count);
  124. trace_self_id_sequence(card->index, self_id_sequence, quadlet_count, generation);
  125. for (port_index = 0; port_index < port_capacity; ++port_index) {
  126. port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
  127. port_index);
  128. switch (port_status) {
  129. case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
  130. ++child_port_count;
  131. fallthrough;
  132. case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
  133. case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
  134. ++total_port_count;
  135. fallthrough;
  136. case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
  137. default:
  138. break;
  139. }
  140. }
  141. if (phy_id != phy_packet_self_id_get_phy_id(self_id_sequence[0])) {
  142. fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
  143. phy_id, phy_packet_self_id_get_phy_id(self_id_sequence[0]));
  144. return NULL;
  145. }
  146. if (child_port_count > stack_depth) {
  147. fw_err(card, "topology stack underflow\n");
  148. return NULL;
  149. }
  150. /*
  151. * Seek back from the top of our stack to find the
  152. * start of the child nodes for this node.
  153. */
  154. for (i = 0, h = &stack; i < child_port_count; i++)
  155. h = h->prev;
  156. /*
  157. * When the stack is empty, this yields an invalid value,
  158. * but that pointer will never be dereferenced.
  159. */
  160. child = fw_node(h);
  161. node = fw_node_create(self_id_sequence[0], total_port_count, card->color);
  162. if (node == NULL) {
  163. fw_err(card, "out of memory while building topology\n");
  164. return NULL;
  165. }
  166. if (phy_id == (card->node_id & 0x3f))
  167. local_node = node;
  168. if (phy_packet_self_id_zero_get_contender(self_id_sequence[0]))
  169. irm_node = node;
  170. for (port_index = 0; port_index < total_port_count; ++port_index) {
  171. port_status = self_id_sequence_get_port_status(self_id_sequence, quadlet_count,
  172. port_index);
  173. switch (port_status) {
  174. case PHY_PACKET_SELF_ID_PORT_STATUS_PARENT:
  175. // Who's your daddy? We dont know the parent node at this time, so
  176. // we temporarily abuse node->color for remembering the entry in
  177. // the node->ports array where the parent node should be. Later,
  178. // when we handle the parent node, we fix up the reference.
  179. ++parent_count;
  180. node->color = port_index;
  181. break;
  182. case PHY_PACKET_SELF_ID_PORT_STATUS_CHILD:
  183. node->ports[port_index] = child;
  184. // Fix up parent reference for this child node.
  185. child->ports[child->color] = node;
  186. child->color = card->color;
  187. child = fw_node(child->link.next);
  188. break;
  189. case PHY_PACKET_SELF_ID_PORT_STATUS_NCONN:
  190. case PHY_PACKET_SELF_ID_PORT_STATUS_NONE:
  191. default:
  192. break;
  193. }
  194. }
  195. // Check that the node reports exactly one parent port, except for the root, which
  196. // of course should have no parents.
  197. if ((enumerator.quadlet_count == 0 && parent_count != 0) ||
  198. (enumerator.quadlet_count > 0 && parent_count != 1)) {
  199. fw_err(card, "parent port inconsistency for node %d: "
  200. "parent_count=%d\n", phy_id, parent_count);
  201. return NULL;
  202. }
  203. /* Pop the child nodes off the stack and push the new node. */
  204. __list_del(h->prev, &stack);
  205. list_add_tail(&node->link, &stack);
  206. stack_depth += 1 - child_port_count;
  207. if (node->phy_speed == SCODE_BETA && parent_count + child_port_count > 1)
  208. beta_repeaters_present = true;
  209. // If PHYs report different gap counts, set an invalid count which will force a gap
  210. // count reconfiguration and a reset.
  211. if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
  212. gap_count = 0;
  213. update_hop_count(node);
  214. phy_id++;
  215. }
  216. card->root_node = node;
  217. card->irm_node = irm_node;
  218. card->gap_count = gap_count;
  219. card->beta_repeaters_present = beta_repeaters_present;
  220. return local_node;
  221. }
  222. typedef void (*fw_node_callback_t)(struct fw_card * card,
  223. struct fw_node * node,
  224. struct fw_node * parent);
  225. static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
  226. fw_node_callback_t callback)
  227. {
  228. struct list_head list;
  229. struct fw_node *node, *next, *child, *parent;
  230. int i;
  231. INIT_LIST_HEAD(&list);
  232. fw_node_get(root);
  233. list_add_tail(&root->link, &list);
  234. parent = NULL;
  235. list_for_each_entry(node, &list, link) {
  236. node->color = card->color;
  237. for (i = 0; i < node->port_count; i++) {
  238. child = node->ports[i];
  239. if (!child)
  240. continue;
  241. if (child->color == card->color)
  242. parent = child;
  243. else {
  244. fw_node_get(child);
  245. list_add_tail(&child->link, &list);
  246. }
  247. }
  248. callback(card, node, parent);
  249. }
  250. list_for_each_entry_safe(node, next, &list, link)
  251. fw_node_put(node);
  252. }
  253. static void report_lost_node(struct fw_card *card,
  254. struct fw_node *node, struct fw_node *parent)
  255. {
  256. fw_node_event(card, node, FW_NODE_DESTROYED);
  257. fw_node_put(node);
  258. /* Topology has changed - reset bus manager retry counter */
  259. card->bm_retries = 0;
  260. }
  261. static void report_found_node(struct fw_card *card,
  262. struct fw_node *node, struct fw_node *parent)
  263. {
  264. int b_path = (node->phy_speed == SCODE_BETA);
  265. if (parent != NULL) {
  266. /* min() macro doesn't work here with gcc 3.4 */
  267. node->max_speed = parent->max_speed < node->phy_speed ?
  268. parent->max_speed : node->phy_speed;
  269. node->b_path = parent->b_path && b_path;
  270. } else {
  271. node->max_speed = node->phy_speed;
  272. node->b_path = b_path;
  273. }
  274. fw_node_event(card, node, FW_NODE_CREATED);
  275. /* Topology has changed - reset bus manager retry counter */
  276. card->bm_retries = 0;
  277. }
  278. /* Must be called with card->lock held */
  279. void fw_destroy_nodes(struct fw_card *card)
  280. {
  281. card->color++;
  282. if (card->local_node != NULL)
  283. for_each_fw_node(card, card->local_node, report_lost_node);
  284. card->local_node = NULL;
  285. }
  286. static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
  287. {
  288. struct fw_node *tree;
  289. int i;
  290. tree = node1->ports[port];
  291. node0->ports[port] = tree;
  292. for (i = 0; i < tree->port_count; i++) {
  293. if (tree->ports[i] == node1) {
  294. tree->ports[i] = node0;
  295. break;
  296. }
  297. }
  298. }
  299. /*
  300. * Compare the old topology tree for card with the new one specified by root.
  301. * Queue the nodes and mark them as either found, lost or updated.
  302. * Update the nodes in the card topology tree as we go.
  303. */
  304. static void update_tree(struct fw_card *card, struct fw_node *root)
  305. {
  306. struct list_head list0, list1;
  307. struct fw_node *node0, *node1, *next1;
  308. int i, event;
  309. INIT_LIST_HEAD(&list0);
  310. list_add_tail(&card->local_node->link, &list0);
  311. INIT_LIST_HEAD(&list1);
  312. list_add_tail(&root->link, &list1);
  313. node0 = fw_node(list0.next);
  314. node1 = fw_node(list1.next);
  315. while (&node0->link != &list0) {
  316. WARN_ON(node0->port_count != node1->port_count);
  317. if (node0->link_on && !node1->link_on)
  318. event = FW_NODE_LINK_OFF;
  319. else if (!node0->link_on && node1->link_on)
  320. event = FW_NODE_LINK_ON;
  321. else if (node1->initiated_reset && node1->link_on)
  322. event = FW_NODE_INITIATED_RESET;
  323. else
  324. event = FW_NODE_UPDATED;
  325. node0->node_id = node1->node_id;
  326. node0->color = card->color;
  327. node0->link_on = node1->link_on;
  328. node0->initiated_reset = node1->initiated_reset;
  329. node0->max_hops = node1->max_hops;
  330. node1->color = card->color;
  331. fw_node_event(card, node0, event);
  332. if (card->root_node == node1)
  333. card->root_node = node0;
  334. if (card->irm_node == node1)
  335. card->irm_node = node0;
  336. for (i = 0; i < node0->port_count; i++) {
  337. if (node0->ports[i] && node1->ports[i]) {
  338. /*
  339. * This port didn't change, queue the
  340. * connected node for further
  341. * investigation.
  342. */
  343. if (node0->ports[i]->color == card->color)
  344. continue;
  345. list_add_tail(&node0->ports[i]->link, &list0);
  346. list_add_tail(&node1->ports[i]->link, &list1);
  347. } else if (node0->ports[i]) {
  348. /*
  349. * The nodes connected here were
  350. * unplugged; unref the lost nodes and
  351. * queue FW_NODE_LOST callbacks for
  352. * them.
  353. */
  354. for_each_fw_node(card, node0->ports[i],
  355. report_lost_node);
  356. node0->ports[i] = NULL;
  357. } else if (node1->ports[i]) {
  358. /*
  359. * One or more node were connected to
  360. * this port. Move the new nodes into
  361. * the tree and queue FW_NODE_CREATED
  362. * callbacks for them.
  363. */
  364. move_tree(node0, node1, i);
  365. for_each_fw_node(card, node0->ports[i],
  366. report_found_node);
  367. }
  368. }
  369. node0 = fw_node(node0->link.next);
  370. next1 = fw_node(node1->link.next);
  371. fw_node_put(node1);
  372. node1 = next1;
  373. }
  374. }
  375. static void update_topology_map(struct fw_card *card,
  376. u32 *self_ids, int self_id_count)
  377. {
  378. int node_count = (card->root_node->node_id & 0x3f) + 1;
  379. __be32 *map = card->topology_map;
  380. *map++ = cpu_to_be32((self_id_count + 2) << 16);
  381. *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
  382. *map++ = cpu_to_be32((node_count << 16) | self_id_count);
  383. while (self_id_count--)
  384. *map++ = cpu_to_be32p(self_ids++);
  385. fw_compute_block_crc(card->topology_map);
  386. }
  387. void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
  388. int self_id_count, u32 *self_ids, bool bm_abdicate)
  389. {
  390. struct fw_node *local_node;
  391. trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
  392. guard(spinlock_irqsave)(&card->lock);
  393. /*
  394. * If the selfID buffer is not the immediate successor of the
  395. * previously processed one, we cannot reliably compare the
  396. * old and new topologies.
  397. */
  398. if (!is_next_generation(generation, card->generation) &&
  399. card->local_node != NULL) {
  400. fw_destroy_nodes(card);
  401. card->bm_retries = 0;
  402. }
  403. card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
  404. card->node_id = node_id;
  405. /*
  406. * Update node_id before generation to prevent anybody from using
  407. * a stale node_id together with a current generation.
  408. */
  409. smp_wmb();
  410. card->generation = generation;
  411. card->reset_jiffies = get_jiffies_64();
  412. card->bm_node_id = 0xffff;
  413. card->bm_abdicate = bm_abdicate;
  414. fw_schedule_bm_work(card, 0);
  415. local_node = build_tree(card, self_ids, self_id_count, generation);
  416. update_topology_map(card, self_ids, self_id_count);
  417. card->color++;
  418. if (local_node == NULL) {
  419. fw_err(card, "topology build failed\n");
  420. /* FIXME: We need to issue a bus reset in this case. */
  421. } else if (card->local_node == NULL) {
  422. card->local_node = local_node;
  423. for_each_fw_node(card, local_node, report_found_node);
  424. } else {
  425. update_tree(card, local_node);
  426. }
  427. }
  428. EXPORT_SYMBOL(fw_core_handle_bus_reset);