core-card.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
  4. */
  5. #include <linux/bug.h>
  6. #include <linux/completion.h>
  7. #include <linux/crc-itu-t.h>
  8. #include <linux/device.h>
  9. #include <linux/errno.h>
  10. #include <linux/firewire.h>
  11. #include <linux/firewire-constants.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/kernel.h>
  14. #include <linux/kref.h>
  15. #include <linux/list.h>
  16. #include <linux/module.h>
  17. #include <linux/mutex.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/atomic.h>
  21. #include <asm/byteorder.h>
  22. #include "core.h"
  23. #include <trace/events/firewire.h>
  24. #define define_fw_printk_level(func, kern_level) \
  25. void func(const struct fw_card *card, const char *fmt, ...) \
  26. { \
  27. struct va_format vaf; \
  28. va_list args; \
  29. \
  30. va_start(args, fmt); \
  31. vaf.fmt = fmt; \
  32. vaf.va = &args; \
  33. printk(kern_level KBUILD_MODNAME " %s: %pV", \
  34. dev_name(card->device), &vaf); \
  35. va_end(args); \
  36. }
  37. define_fw_printk_level(fw_err, KERN_ERR);
  38. define_fw_printk_level(fw_notice, KERN_NOTICE);
  39. int fw_compute_block_crc(__be32 *block)
  40. {
  41. int length;
  42. u16 crc;
  43. length = (be32_to_cpu(block[0]) >> 16) & 0xff;
  44. crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
  45. *block |= cpu_to_be32(crc);
  46. return length;
  47. }
  48. static DEFINE_MUTEX(card_mutex);
  49. static LIST_HEAD(card_list);
  50. static LIST_HEAD(descriptor_list);
  51. static int descriptor_count;
  52. static __be32 tmp_config_rom[256];
  53. /* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
  54. static size_t config_rom_length = 1 + 4 + 1 + 1;
  55. #define BIB_CRC(v) ((v) << 0)
  56. #define BIB_CRC_LENGTH(v) ((v) << 16)
  57. #define BIB_INFO_LENGTH(v) ((v) << 24)
  58. #define BIB_BUS_NAME 0x31333934 /* "1394" */
  59. #define BIB_LINK_SPEED(v) ((v) << 0)
  60. #define BIB_GENERATION(v) ((v) << 4)
  61. #define BIB_MAX_ROM(v) ((v) << 8)
  62. #define BIB_MAX_RECEIVE(v) ((v) << 12)
  63. #define BIB_CYC_CLK_ACC(v) ((v) << 16)
  64. #define BIB_PMC ((1) << 27)
  65. #define BIB_BMC ((1) << 28)
  66. #define BIB_ISC ((1) << 29)
  67. #define BIB_CMC ((1) << 30)
  68. #define BIB_IRMC ((1) << 31)
  69. #define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
  70. /*
  71. * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
  72. * but we have to make it longer because there are many devices whose firmware
  73. * is just too slow for that.
  74. */
  75. #define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
  76. #define CANON_OUI 0x000085
  77. static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
  78. {
  79. struct fw_descriptor *desc;
  80. int i, j, k, length;
  81. /*
  82. * Initialize contents of config rom buffer. On the OHCI
  83. * controller, block reads to the config rom accesses the host
  84. * memory, but quadlet read access the hardware bus info block
  85. * registers. That's just crack, but it means we should make
  86. * sure the contents of bus info block in host memory matches
  87. * the version stored in the OHCI registers.
  88. */
  89. config_rom[0] = cpu_to_be32(
  90. BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
  91. config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
  92. config_rom[2] = cpu_to_be32(
  93. BIB_LINK_SPEED(card->link_speed) |
  94. BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
  95. BIB_MAX_ROM(2) |
  96. BIB_MAX_RECEIVE(card->max_receive) |
  97. BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
  98. config_rom[3] = cpu_to_be32(card->guid >> 32);
  99. config_rom[4] = cpu_to_be32(card->guid);
  100. /* Generate root directory. */
  101. config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
  102. i = 7;
  103. j = 7 + descriptor_count;
  104. /* Generate root directory entries for descriptors. */
  105. list_for_each_entry (desc, &descriptor_list, link) {
  106. if (desc->immediate > 0)
  107. config_rom[i++] = cpu_to_be32(desc->immediate);
  108. config_rom[i] = cpu_to_be32(desc->key | (j - i));
  109. i++;
  110. j += desc->length;
  111. }
  112. /* Update root directory length. */
  113. config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
  114. /* End of root directory, now copy in descriptors. */
  115. list_for_each_entry (desc, &descriptor_list, link) {
  116. for (k = 0; k < desc->length; k++)
  117. config_rom[i + k] = cpu_to_be32(desc->data[k]);
  118. i += desc->length;
  119. }
  120. /* Calculate CRCs for all blocks in the config rom. This
  121. * assumes that CRC length and info length are identical for
  122. * the bus info block, which is always the case for this
  123. * implementation. */
  124. for (i = 0; i < j; i += length + 1)
  125. length = fw_compute_block_crc(config_rom + i);
  126. WARN_ON(j != config_rom_length);
  127. }
  128. static void update_config_roms(void)
  129. {
  130. struct fw_card *card;
  131. list_for_each_entry (card, &card_list, link) {
  132. generate_config_rom(card, tmp_config_rom);
  133. card->driver->set_config_rom(card, tmp_config_rom,
  134. config_rom_length);
  135. }
  136. }
  137. static size_t required_space(struct fw_descriptor *desc)
  138. {
  139. /* descriptor + entry into root dir + optional immediate entry */
  140. return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
  141. }
  142. int fw_core_add_descriptor(struct fw_descriptor *desc)
  143. {
  144. size_t i;
  145. /*
  146. * Check descriptor is valid; the length of all blocks in the
  147. * descriptor has to add up to exactly the length of the
  148. * block.
  149. */
  150. i = 0;
  151. while (i < desc->length)
  152. i += (desc->data[i] >> 16) + 1;
  153. if (i != desc->length)
  154. return -EINVAL;
  155. guard(mutex)(&card_mutex);
  156. if (config_rom_length + required_space(desc) > 256)
  157. return -EBUSY;
  158. list_add_tail(&desc->link, &descriptor_list);
  159. config_rom_length += required_space(desc);
  160. descriptor_count++;
  161. if (desc->immediate > 0)
  162. descriptor_count++;
  163. update_config_roms();
  164. return 0;
  165. }
  166. EXPORT_SYMBOL(fw_core_add_descriptor);
  167. void fw_core_remove_descriptor(struct fw_descriptor *desc)
  168. {
  169. guard(mutex)(&card_mutex);
  170. list_del(&desc->link);
  171. config_rom_length -= required_space(desc);
  172. descriptor_count--;
  173. if (desc->immediate > 0)
  174. descriptor_count--;
  175. update_config_roms();
  176. }
  177. EXPORT_SYMBOL(fw_core_remove_descriptor);
  178. static int reset_bus(struct fw_card *card, bool short_reset)
  179. {
  180. int reg = short_reset ? 5 : 1;
  181. int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
  182. trace_bus_reset_initiate(card->index, card->generation, short_reset);
  183. return card->driver->update_phy_reg(card, reg, 0, bit);
  184. }
  185. void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
  186. {
  187. trace_bus_reset_schedule(card->index, card->generation, short_reset);
  188. /* We don't try hard to sort out requests of long vs. short resets. */
  189. card->br_short = short_reset;
  190. /* Use an arbitrary short delay to combine multiple reset requests. */
  191. fw_card_get(card);
  192. if (!queue_delayed_work(fw_workqueue, &card->br_work,
  193. delayed ? DIV_ROUND_UP(HZ, 100) : 0))
  194. fw_card_put(card);
  195. }
  196. EXPORT_SYMBOL(fw_schedule_bus_reset);
  197. static void br_work(struct work_struct *work)
  198. {
  199. struct fw_card *card = container_of(work, struct fw_card, br_work.work);
  200. /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
  201. if (card->reset_jiffies != 0 &&
  202. time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
  203. trace_bus_reset_postpone(card->index, card->generation, card->br_short);
  204. if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
  205. fw_card_put(card);
  206. return;
  207. }
  208. fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
  209. FW_PHY_CONFIG_CURRENT_GAP_COUNT);
  210. reset_bus(card, card->br_short);
  211. fw_card_put(card);
  212. }
  213. static void allocate_broadcast_channel(struct fw_card *card, int generation)
  214. {
  215. int channel, bandwidth = 0;
  216. if (!card->broadcast_channel_allocated) {
  217. fw_iso_resource_manage(card, generation, 1ULL << 31,
  218. &channel, &bandwidth, true);
  219. if (channel != 31) {
  220. fw_notice(card, "failed to allocate broadcast channel\n");
  221. return;
  222. }
  223. card->broadcast_channel_allocated = true;
  224. }
  225. device_for_each_child(card->device, (void *)(long)generation,
  226. fw_device_set_broadcast_channel);
  227. }
  228. static const char gap_count_table[] = {
  229. 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
  230. };
  231. void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
  232. {
  233. fw_card_get(card);
  234. if (!schedule_delayed_work(&card->bm_work, delay))
  235. fw_card_put(card);
  236. }
  237. static void bm_work(struct work_struct *work)
  238. {
  239. struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
  240. struct fw_device *root_device, *irm_device;
  241. struct fw_node *root_node;
  242. int root_id, new_root_id, irm_id, bm_id, local_id;
  243. int gap_count, generation, grace, rcode;
  244. bool do_reset = false;
  245. bool root_device_is_running;
  246. bool root_device_is_cmc;
  247. bool irm_is_1394_1995_only;
  248. bool keep_this_irm;
  249. __be32 transaction_data[2];
  250. spin_lock_irq(&card->lock);
  251. if (card->local_node == NULL) {
  252. spin_unlock_irq(&card->lock);
  253. goto out_put_card;
  254. }
  255. generation = card->generation;
  256. root_node = card->root_node;
  257. fw_node_get(root_node);
  258. root_device = root_node->data;
  259. root_device_is_running = root_device &&
  260. atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
  261. root_device_is_cmc = root_device && root_device->cmc;
  262. irm_device = card->irm_node->data;
  263. irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
  264. (irm_device->config_rom[2] & 0x000000f0) == 0;
  265. /* Canon MV5i works unreliably if it is not root node. */
  266. keep_this_irm = irm_device && irm_device->config_rom &&
  267. irm_device->config_rom[3] >> 8 == CANON_OUI;
  268. root_id = root_node->node_id;
  269. irm_id = card->irm_node->node_id;
  270. local_id = card->local_node->node_id;
  271. grace = time_after64(get_jiffies_64(),
  272. card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
  273. if ((is_next_generation(generation, card->bm_generation) &&
  274. !card->bm_abdicate) ||
  275. (card->bm_generation != generation && grace)) {
  276. /*
  277. * This first step is to figure out who is IRM and
  278. * then try to become bus manager. If the IRM is not
  279. * well defined (e.g. does not have an active link
  280. * layer or does not responds to our lock request, we
  281. * will have to do a little vigilante bus management.
  282. * In that case, we do a goto into the gap count logic
  283. * so that when we do the reset, we still optimize the
  284. * gap count. That could well save a reset in the
  285. * next generation.
  286. */
  287. if (!card->irm_node->link_on) {
  288. new_root_id = local_id;
  289. fw_notice(card, "%s, making local node (%02x) root\n",
  290. "IRM has link off", new_root_id);
  291. goto pick_me;
  292. }
  293. if (irm_is_1394_1995_only && !keep_this_irm) {
  294. new_root_id = local_id;
  295. fw_notice(card, "%s, making local node (%02x) root\n",
  296. "IRM is not 1394a compliant", new_root_id);
  297. goto pick_me;
  298. }
  299. transaction_data[0] = cpu_to_be32(0x3f);
  300. transaction_data[1] = cpu_to_be32(local_id);
  301. spin_unlock_irq(&card->lock);
  302. rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
  303. irm_id, generation, SCODE_100,
  304. CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
  305. transaction_data, 8);
  306. if (rcode == RCODE_GENERATION)
  307. /* Another bus reset, BM work has been rescheduled. */
  308. goto out;
  309. bm_id = be32_to_cpu(transaction_data[0]);
  310. scoped_guard(spinlock_irq, &card->lock) {
  311. if (rcode == RCODE_COMPLETE && generation == card->generation)
  312. card->bm_node_id =
  313. bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
  314. }
  315. if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
  316. /* Somebody else is BM. Only act as IRM. */
  317. if (local_id == irm_id)
  318. allocate_broadcast_channel(card, generation);
  319. goto out;
  320. }
  321. if (rcode == RCODE_SEND_ERROR) {
  322. /*
  323. * We have been unable to send the lock request due to
  324. * some local problem. Let's try again later and hope
  325. * that the problem has gone away by then.
  326. */
  327. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  328. goto out;
  329. }
  330. spin_lock_irq(&card->lock);
  331. if (rcode != RCODE_COMPLETE && !keep_this_irm) {
  332. /*
  333. * The lock request failed, maybe the IRM
  334. * isn't really IRM capable after all. Let's
  335. * do a bus reset and pick the local node as
  336. * root, and thus, IRM.
  337. */
  338. new_root_id = local_id;
  339. fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
  340. fw_rcode_string(rcode), new_root_id);
  341. goto pick_me;
  342. }
  343. } else if (card->bm_generation != generation) {
  344. /*
  345. * We weren't BM in the last generation, and the last
  346. * bus reset is less than 125ms ago. Reschedule this job.
  347. */
  348. spin_unlock_irq(&card->lock);
  349. fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
  350. goto out;
  351. }
  352. /*
  353. * We're bus manager for this generation, so next step is to
  354. * make sure we have an active cycle master and do gap count
  355. * optimization.
  356. */
  357. card->bm_generation = generation;
  358. if (card->gap_count == 0) {
  359. /*
  360. * If self IDs have inconsistent gap counts, do a
  361. * bus reset ASAP. The config rom read might never
  362. * complete, so don't wait for it. However, still
  363. * send a PHY configuration packet prior to the
  364. * bus reset. The PHY configuration packet might
  365. * fail, but 1394-2008 8.4.5.2 explicitly permits
  366. * it in this case, so it should be safe to try.
  367. */
  368. new_root_id = local_id;
  369. /*
  370. * We must always send a bus reset if the gap count
  371. * is inconsistent, so bypass the 5-reset limit.
  372. */
  373. card->bm_retries = 0;
  374. } else if (root_device == NULL) {
  375. /*
  376. * Either link_on is false, or we failed to read the
  377. * config rom. In either case, pick another root.
  378. */
  379. new_root_id = local_id;
  380. } else if (!root_device_is_running) {
  381. /*
  382. * If we haven't probed this device yet, bail out now
  383. * and let's try again once that's done.
  384. */
  385. spin_unlock_irq(&card->lock);
  386. goto out;
  387. } else if (root_device_is_cmc) {
  388. /*
  389. * We will send out a force root packet for this
  390. * node as part of the gap count optimization.
  391. */
  392. new_root_id = root_id;
  393. } else {
  394. /*
  395. * Current root has an active link layer and we
  396. * successfully read the config rom, but it's not
  397. * cycle master capable.
  398. */
  399. new_root_id = local_id;
  400. }
  401. pick_me:
  402. /*
  403. * Pick a gap count from 1394a table E-1. The table doesn't cover
  404. * the typically much larger 1394b beta repeater delays though.
  405. */
  406. if (!card->beta_repeaters_present &&
  407. root_node->max_hops < ARRAY_SIZE(gap_count_table))
  408. gap_count = gap_count_table[root_node->max_hops];
  409. else
  410. gap_count = 63;
  411. /*
  412. * Finally, figure out if we should do a reset or not. If we have
  413. * done less than 5 resets with the same physical topology and we
  414. * have either a new root or a new gap count setting, let's do it.
  415. */
  416. if (card->bm_retries++ < 5 &&
  417. (card->gap_count != gap_count || new_root_id != root_id))
  418. do_reset = true;
  419. spin_unlock_irq(&card->lock);
  420. if (do_reset) {
  421. fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
  422. new_root_id, gap_count);
  423. fw_send_phy_config(card, new_root_id, generation, gap_count);
  424. /*
  425. * Where possible, use a short bus reset to minimize
  426. * disruption to isochronous transfers. But in the event
  427. * of a gap count inconsistency, use a long bus reset.
  428. *
  429. * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
  430. * may set different gap counts after a bus reset. On a mixed
  431. * 1394/1394a bus, a short bus reset can get doubled. Some
  432. * nodes may treat the double reset as one bus reset and others
  433. * may treat it as two, causing a gap count inconsistency
  434. * again. Using a long bus reset prevents this.
  435. */
  436. reset_bus(card, card->gap_count != 0);
  437. /* Will allocate broadcast channel after the reset. */
  438. goto out;
  439. }
  440. if (root_device_is_cmc) {
  441. /*
  442. * Make sure that the cycle master sends cycle start packets.
  443. */
  444. transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
  445. rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
  446. root_id, generation, SCODE_100,
  447. CSR_REGISTER_BASE + CSR_STATE_SET,
  448. transaction_data, 4);
  449. if (rcode == RCODE_GENERATION)
  450. goto out;
  451. }
  452. if (local_id == irm_id)
  453. allocate_broadcast_channel(card, generation);
  454. out:
  455. fw_node_put(root_node);
  456. out_put_card:
  457. fw_card_put(card);
  458. }
  459. void fw_card_initialize(struct fw_card *card,
  460. const struct fw_card_driver *driver,
  461. struct device *device)
  462. {
  463. static atomic_t index = ATOMIC_INIT(-1);
  464. card->index = atomic_inc_return(&index);
  465. card->driver = driver;
  466. card->device = device;
  467. card->current_tlabel = 0;
  468. card->tlabel_mask = 0;
  469. card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
  470. card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
  471. card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
  472. card->split_timeout_jiffies =
  473. DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
  474. card->color = 0;
  475. card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
  476. kref_init(&card->kref);
  477. init_completion(&card->done);
  478. INIT_LIST_HEAD(&card->transaction_list);
  479. INIT_LIST_HEAD(&card->phy_receiver_list);
  480. spin_lock_init(&card->lock);
  481. card->local_node = NULL;
  482. INIT_DELAYED_WORK(&card->br_work, br_work);
  483. INIT_DELAYED_WORK(&card->bm_work, bm_work);
  484. }
  485. EXPORT_SYMBOL(fw_card_initialize);
  486. int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
  487. unsigned int supported_isoc_contexts)
  488. {
  489. struct workqueue_struct *isoc_wq;
  490. int ret;
  491. // This workqueue should be:
  492. // * != WQ_BH Sleepable.
  493. // * == WQ_UNBOUND Any core can process data for isoc context. The
  494. // implementation of unit protocol could consumes the core
  495. // longer somehow.
  496. // * != WQ_MEM_RECLAIM Not used for any backend of block device.
  497. // * == WQ_FREEZABLE Isochronous communication is at regular interval in real
  498. // time, thus should be drained if possible at freeze phase.
  499. // * == WQ_HIGHPRI High priority to process semi-realtime timestamped data.
  500. // * == WQ_SYSFS Parameters are available via sysfs.
  501. // * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
  502. // contexts if they are scheduled to the same cycle.
  503. isoc_wq = alloc_workqueue("firewire-isoc-card%u",
  504. WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
  505. supported_isoc_contexts, card->index);
  506. if (!isoc_wq)
  507. return -ENOMEM;
  508. card->max_receive = max_receive;
  509. card->link_speed = link_speed;
  510. card->guid = guid;
  511. guard(mutex)(&card_mutex);
  512. generate_config_rom(card, tmp_config_rom);
  513. ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
  514. if (ret < 0) {
  515. destroy_workqueue(isoc_wq);
  516. return ret;
  517. }
  518. card->isoc_wq = isoc_wq;
  519. list_add_tail(&card->link, &card_list);
  520. return 0;
  521. }
  522. EXPORT_SYMBOL(fw_card_add);
  523. /*
  524. * The next few functions implement a dummy driver that is used once a card
  525. * driver shuts down an fw_card. This allows the driver to cleanly unload,
  526. * as all IO to the card will be handled (and failed) by the dummy driver
  527. * instead of calling into the module. Only functions for iso context
  528. * shutdown still need to be provided by the card driver.
  529. *
  530. * .read/write_csr() should never be called anymore after the dummy driver
  531. * was bound since they are only used within request handler context.
  532. * .set_config_rom() is never called since the card is taken out of card_list
  533. * before switching to the dummy driver.
  534. */
  535. static int dummy_read_phy_reg(struct fw_card *card, int address)
  536. {
  537. return -ENODEV;
  538. }
  539. static int dummy_update_phy_reg(struct fw_card *card, int address,
  540. int clear_bits, int set_bits)
  541. {
  542. return -ENODEV;
  543. }
  544. static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
  545. {
  546. packet->callback(packet, card, RCODE_CANCELLED);
  547. }
  548. static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
  549. {
  550. packet->callback(packet, card, RCODE_CANCELLED);
  551. }
  552. static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
  553. {
  554. return -ENOENT;
  555. }
  556. static int dummy_enable_phys_dma(struct fw_card *card,
  557. int node_id, int generation)
  558. {
  559. return -ENODEV;
  560. }
  561. static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
  562. int type, int channel, size_t header_size)
  563. {
  564. return ERR_PTR(-ENODEV);
  565. }
  566. static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
  567. {
  568. return 0;
  569. }
  570. static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
  571. {
  572. }
  573. static int dummy_start_iso(struct fw_iso_context *ctx,
  574. s32 cycle, u32 sync, u32 tags)
  575. {
  576. return -ENODEV;
  577. }
  578. static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
  579. {
  580. return -ENODEV;
  581. }
  582. static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
  583. struct fw_iso_buffer *buffer, unsigned long payload)
  584. {
  585. return -ENODEV;
  586. }
  587. static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
  588. {
  589. }
  590. static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
  591. {
  592. return -ENODEV;
  593. }
  594. static const struct fw_card_driver dummy_driver_template = {
  595. .read_phy_reg = dummy_read_phy_reg,
  596. .update_phy_reg = dummy_update_phy_reg,
  597. .send_request = dummy_send_request,
  598. .send_response = dummy_send_response,
  599. .cancel_packet = dummy_cancel_packet,
  600. .enable_phys_dma = dummy_enable_phys_dma,
  601. .read_csr = dummy_read_csr,
  602. .write_csr = dummy_write_csr,
  603. .allocate_iso_context = dummy_allocate_iso_context,
  604. .start_iso = dummy_start_iso,
  605. .set_iso_channels = dummy_set_iso_channels,
  606. .queue_iso = dummy_queue_iso,
  607. .flush_queue_iso = dummy_flush_queue_iso,
  608. .flush_iso_completions = dummy_flush_iso_completions,
  609. };
  610. void fw_card_release(struct kref *kref)
  611. {
  612. struct fw_card *card = container_of(kref, struct fw_card, kref);
  613. complete(&card->done);
  614. }
  615. EXPORT_SYMBOL_GPL(fw_card_release);
  616. void fw_core_remove_card(struct fw_card *card)
  617. {
  618. struct fw_card_driver dummy_driver = dummy_driver_template;
  619. might_sleep();
  620. card->driver->update_phy_reg(card, 4,
  621. PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
  622. fw_schedule_bus_reset(card, false, true);
  623. scoped_guard(mutex, &card_mutex)
  624. list_del_init(&card->link);
  625. /* Switch off most of the card driver interface. */
  626. dummy_driver.free_iso_context = card->driver->free_iso_context;
  627. dummy_driver.stop_iso = card->driver->stop_iso;
  628. card->driver = &dummy_driver;
  629. drain_workqueue(card->isoc_wq);
  630. scoped_guard(spinlock_irqsave, &card->lock)
  631. fw_destroy_nodes(card);
  632. /* Wait for all users, especially device workqueue jobs, to finish. */
  633. fw_card_put(card);
  634. wait_for_completion(&card->done);
  635. destroy_workqueue(card->isoc_wq);
  636. WARN_ON(!list_empty(&card->transaction_list));
  637. }
  638. EXPORT_SYMBOL(fw_core_remove_card);
  639. /**
  640. * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
  641. * for controller card.
  642. * @card: The instance of card for 1394 OHCI controller.
  643. * @cycle_time: The mutual reference to value of cycle time for the read operation.
  644. *
  645. * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
  646. * controller card. This function accesses the region without any lock primitives or IRQ mask.
  647. * When returning successfully, the content of @value argument has value aligned to host endianness,
  648. * formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
  649. *
  650. * Context: Any context.
  651. * Return:
  652. * * 0 - Read successfully.
  653. * * -ENODEV - The controller is unavailable due to being removed or unbound.
  654. */
  655. int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
  656. {
  657. if (card->driver->read_csr == dummy_read_csr)
  658. return -ENODEV;
  659. // It's possible to switch to dummy driver between the above and the below. This is the best
  660. // effort to return -ENODEV.
  661. *cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
  662. return 0;
  663. }
  664. EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);