core-transaction.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Core IEEE1394 transaction logic
  4. *
  5. * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/completion.h>
  9. #include <linux/device.h>
  10. #include <linux/errno.h>
  11. #include <linux/firewire.h>
  12. #include <linux/firewire-constants.h>
  13. #include <linux/fs.h>
  14. #include <linux/init.h>
  15. #include <linux/jiffies.h>
  16. #include <linux/kernel.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/rculist.h>
  20. #include <linux/slab.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/string.h>
  23. #include <linux/timer.h>
  24. #include <linux/types.h>
  25. #include <linux/workqueue.h>
  26. #include <asm/byteorder.h>
  27. #include "core.h"
  28. #include "packet-header-definitions.h"
  29. #include "phy-packet-definitions.h"
  30. #include <trace/events/firewire.h>
  31. #define HEADER_DESTINATION_IS_BROADCAST(header) \
  32. ((async_header_get_destination(header) & 0x3f) == 0x3f)
  33. /* returns 0 if the split timeout handler is already running */
  34. static int try_cancel_split_timeout(struct fw_transaction *t)
  35. {
  36. if (t->is_split_transaction)
  37. return del_timer(&t->split_timeout_timer);
  38. else
  39. return 1;
  40. }
  41. static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
  42. u32 response_tstamp)
  43. {
  44. struct fw_transaction *t = NULL, *iter;
  45. scoped_guard(spinlock_irqsave, &card->lock) {
  46. list_for_each_entry(iter, &card->transaction_list, link) {
  47. if (iter == transaction) {
  48. if (try_cancel_split_timeout(iter)) {
  49. list_del_init(&iter->link);
  50. card->tlabel_mask &= ~(1ULL << iter->tlabel);
  51. t = iter;
  52. }
  53. break;
  54. }
  55. }
  56. }
  57. if (!t)
  58. return -ENOENT;
  59. if (!t->with_tstamp) {
  60. t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
  61. } else {
  62. t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, NULL, 0,
  63. t->callback_data);
  64. }
  65. return 0;
  66. }
  67. /*
  68. * Only valid for transactions that are potentially pending (ie have
  69. * been sent).
  70. */
  71. int fw_cancel_transaction(struct fw_card *card,
  72. struct fw_transaction *transaction)
  73. {
  74. u32 tstamp;
  75. /*
  76. * Cancel the packet transmission if it's still queued. That
  77. * will call the packet transmission callback which cancels
  78. * the transaction.
  79. */
  80. if (card->driver->cancel_packet(card, &transaction->packet) == 0)
  81. return 0;
  82. /*
  83. * If the request packet has already been sent, we need to see
  84. * if the transaction is still pending and remove it in that case.
  85. */
  86. if (transaction->packet.ack == 0) {
  87. // The timestamp is reused since it was just read now.
  88. tstamp = transaction->packet.timestamp;
  89. } else {
  90. u32 curr_cycle_time = 0;
  91. (void)fw_card_read_cycle_time(card, &curr_cycle_time);
  92. tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
  93. }
  94. return close_transaction(transaction, card, RCODE_CANCELLED, tstamp);
  95. }
  96. EXPORT_SYMBOL(fw_cancel_transaction);
  97. static void split_transaction_timeout_callback(struct timer_list *timer)
  98. {
  99. struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
  100. struct fw_card *card = t->card;
  101. scoped_guard(spinlock_irqsave, &card->lock) {
  102. if (list_empty(&t->link))
  103. return;
  104. list_del(&t->link);
  105. card->tlabel_mask &= ~(1ULL << t->tlabel);
  106. }
  107. if (!t->with_tstamp) {
  108. t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
  109. } else {
  110. t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp,
  111. t->split_timeout_cycle, NULL, 0, t->callback_data);
  112. }
  113. }
  114. static void start_split_transaction_timeout(struct fw_transaction *t,
  115. struct fw_card *card)
  116. {
  117. guard(spinlock_irqsave)(&card->lock);
  118. if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
  119. return;
  120. t->is_split_transaction = true;
  121. mod_timer(&t->split_timeout_timer,
  122. jiffies + card->split_timeout_jiffies);
  123. }
  124. static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
  125. static void transmit_complete_callback(struct fw_packet *packet,
  126. struct fw_card *card, int status)
  127. {
  128. struct fw_transaction *t =
  129. container_of(packet, struct fw_transaction, packet);
  130. trace_async_request_outbound_complete((uintptr_t)t, card->index, packet->generation,
  131. packet->speed, status, packet->timestamp);
  132. switch (status) {
  133. case ACK_COMPLETE:
  134. close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
  135. break;
  136. case ACK_PENDING:
  137. {
  138. t->split_timeout_cycle =
  139. compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
  140. start_split_transaction_timeout(t, card);
  141. break;
  142. }
  143. case ACK_BUSY_X:
  144. case ACK_BUSY_A:
  145. case ACK_BUSY_B:
  146. close_transaction(t, card, RCODE_BUSY, packet->timestamp);
  147. break;
  148. case ACK_DATA_ERROR:
  149. close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp);
  150. break;
  151. case ACK_TYPE_ERROR:
  152. close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp);
  153. break;
  154. default:
  155. /*
  156. * In this case the ack is really a juju specific
  157. * rcode, so just forward that to the callback.
  158. */
  159. close_transaction(t, card, status, packet->timestamp);
  160. break;
  161. }
  162. }
  163. static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
  164. int destination_id, int source_id, int generation, int speed,
  165. unsigned long long offset, void *payload, size_t length)
  166. {
  167. int ext_tcode;
  168. if (tcode == TCODE_STREAM_DATA) {
  169. // The value of destination_id argument should include tag, channel, and sy fields
  170. // as isochronous packet header has.
  171. packet->header[0] = destination_id;
  172. isoc_header_set_data_length(packet->header, length);
  173. isoc_header_set_tcode(packet->header, TCODE_STREAM_DATA);
  174. packet->header_length = 4;
  175. packet->payload = payload;
  176. packet->payload_length = length;
  177. goto common;
  178. }
  179. if (tcode > 0x10) {
  180. ext_tcode = tcode & ~0x10;
  181. tcode = TCODE_LOCK_REQUEST;
  182. } else
  183. ext_tcode = 0;
  184. async_header_set_retry(packet->header, RETRY_X);
  185. async_header_set_tlabel(packet->header, tlabel);
  186. async_header_set_tcode(packet->header, tcode);
  187. async_header_set_destination(packet->header, destination_id);
  188. async_header_set_source(packet->header, source_id);
  189. async_header_set_offset(packet->header, offset);
  190. switch (tcode) {
  191. case TCODE_WRITE_QUADLET_REQUEST:
  192. async_header_set_quadlet_data(packet->header, *(u32 *)payload);
  193. packet->header_length = 16;
  194. packet->payload_length = 0;
  195. break;
  196. case TCODE_LOCK_REQUEST:
  197. case TCODE_WRITE_BLOCK_REQUEST:
  198. async_header_set_data_length(packet->header, length);
  199. async_header_set_extended_tcode(packet->header, ext_tcode);
  200. packet->header_length = 16;
  201. packet->payload = payload;
  202. packet->payload_length = length;
  203. break;
  204. case TCODE_READ_QUADLET_REQUEST:
  205. packet->header_length = 12;
  206. packet->payload_length = 0;
  207. break;
  208. case TCODE_READ_BLOCK_REQUEST:
  209. async_header_set_data_length(packet->header, length);
  210. async_header_set_extended_tcode(packet->header, ext_tcode);
  211. packet->header_length = 16;
  212. packet->payload_length = 0;
  213. break;
  214. default:
  215. WARN(1, "wrong tcode %d\n", tcode);
  216. }
  217. common:
  218. packet->speed = speed;
  219. packet->generation = generation;
  220. packet->ack = 0;
  221. packet->payload_mapped = false;
  222. }
  223. static int allocate_tlabel(struct fw_card *card)
  224. {
  225. int tlabel;
  226. tlabel = card->current_tlabel;
  227. while (card->tlabel_mask & (1ULL << tlabel)) {
  228. tlabel = (tlabel + 1) & 0x3f;
  229. if (tlabel == card->current_tlabel)
  230. return -EBUSY;
  231. }
  232. card->current_tlabel = (tlabel + 1) & 0x3f;
  233. card->tlabel_mask |= 1ULL << tlabel;
  234. return tlabel;
  235. }
  236. /**
  237. * __fw_send_request() - submit a request packet for transmission to generate callback for response
  238. * subaction with or without time stamp.
  239. * @card: interface to send the request at
  240. * @t: transaction instance to which the request belongs
  241. * @tcode: transaction code
  242. * @destination_id: destination node ID, consisting of bus_ID and phy_ID
  243. * @generation: bus generation in which request and response are valid
  244. * @speed: transmission speed
  245. * @offset: 48bit wide offset into destination's address space
  246. * @payload: data payload for the request subaction
  247. * @length: length of the payload, in bytes
  248. * @callback: union of two functions whether to receive time stamp or not for response
  249. * subaction.
  250. * @with_tstamp: Whether to receive time stamp or not for response subaction.
  251. * @callback_data: data to be passed to the transaction completion callback
  252. *
  253. * Submit a request packet into the asynchronous request transmission queue.
  254. * Can be called from atomic context. If you prefer a blocking API, use
  255. * fw_run_transaction() in a context that can sleep.
  256. *
  257. * In case of lock requests, specify one of the firewire-core specific %TCODE_
  258. * constants instead of %TCODE_LOCK_REQUEST in @tcode.
  259. *
  260. * Make sure that the value in @destination_id is not older than the one in
  261. * @generation. Otherwise the request is in danger to be sent to a wrong node.
  262. *
  263. * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
  264. * needs to synthesize @destination_id with fw_stream_packet_destination_id().
  265. * It will contain tag, channel, and sy data instead of a node ID then.
  266. *
  267. * The payload buffer at @data is going to be DMA-mapped except in case of
  268. * @length <= 8 or of local (loopback) requests. Hence make sure that the
  269. * buffer complies with the restrictions of the streaming DMA mapping API.
  270. * @payload must not be freed before the @callback is called.
  271. *
  272. * In case of request types without payload, @data is NULL and @length is 0.
  273. *
  274. * After the transaction is completed successfully or unsuccessfully, the
  275. * @callback will be called. Among its parameters is the response code which
  276. * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
  277. * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
  278. * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
  279. * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
  280. * generation, or missing ACK respectively.
  281. *
  282. * Note some timing corner cases: fw_send_request() may complete much earlier
  283. * than when the request packet actually hits the wire. On the other hand,
  284. * transaction completion and hence execution of @callback may happen even
  285. * before fw_send_request() returns.
  286. */
  287. void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
  288. int destination_id, int generation, int speed, unsigned long long offset,
  289. void *payload, size_t length, union fw_transaction_callback callback,
  290. bool with_tstamp, void *callback_data)
  291. {
  292. unsigned long flags;
  293. int tlabel;
  294. /*
  295. * Allocate tlabel from the bitmap and put the transaction on
  296. * the list while holding the card spinlock.
  297. */
  298. spin_lock_irqsave(&card->lock, flags);
  299. tlabel = allocate_tlabel(card);
  300. if (tlabel < 0) {
  301. spin_unlock_irqrestore(&card->lock, flags);
  302. if (!with_tstamp) {
  303. callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
  304. } else {
  305. // Timestamping on behalf of hardware.
  306. u32 curr_cycle_time = 0;
  307. u32 tstamp;
  308. (void)fw_card_read_cycle_time(card, &curr_cycle_time);
  309. tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
  310. callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0,
  311. callback_data);
  312. }
  313. return;
  314. }
  315. t->node_id = destination_id;
  316. t->tlabel = tlabel;
  317. t->card = card;
  318. t->is_split_transaction = false;
  319. timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0);
  320. t->callback = callback;
  321. t->with_tstamp = with_tstamp;
  322. t->callback_data = callback_data;
  323. fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
  324. speed, offset, payload, length);
  325. t->packet.callback = transmit_complete_callback;
  326. list_add_tail(&t->link, &card->transaction_list);
  327. spin_unlock_irqrestore(&card->lock, flags);
  328. trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
  329. t->packet.header, payload,
  330. tcode_is_read_request(tcode) ? 0 : length / 4);
  331. card->driver->send_request(card, &t->packet);
  332. }
  333. EXPORT_SYMBOL_GPL(__fw_send_request);
  334. struct transaction_callback_data {
  335. struct completion done;
  336. void *payload;
  337. int rcode;
  338. };
  339. static void transaction_callback(struct fw_card *card, int rcode,
  340. void *payload, size_t length, void *data)
  341. {
  342. struct transaction_callback_data *d = data;
  343. if (rcode == RCODE_COMPLETE)
  344. memcpy(d->payload, payload, length);
  345. d->rcode = rcode;
  346. complete(&d->done);
  347. }
  348. /**
  349. * fw_run_transaction() - send request and sleep until transaction is completed
  350. * @card: card interface for this request
  351. * @tcode: transaction code
  352. * @destination_id: destination node ID, consisting of bus_ID and phy_ID
  353. * @generation: bus generation in which request and response are valid
  354. * @speed: transmission speed
  355. * @offset: 48bit wide offset into destination's address space
  356. * @payload: data payload for the request subaction
  357. * @length: length of the payload, in bytes
  358. *
  359. * Returns the RCODE. See fw_send_request() for parameter documentation.
  360. * Unlike fw_send_request(), @data points to the payload of the request or/and
  361. * to the payload of the response. DMA mapping restrictions apply to outbound
  362. * request payloads of >= 8 bytes but not to inbound response payloads.
  363. */
  364. int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
  365. int generation, int speed, unsigned long long offset,
  366. void *payload, size_t length)
  367. {
  368. struct transaction_callback_data d;
  369. struct fw_transaction t;
  370. timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
  371. init_completion(&d.done);
  372. d.payload = payload;
  373. fw_send_request(card, &t, tcode, destination_id, generation, speed,
  374. offset, payload, length, transaction_callback, &d);
  375. wait_for_completion(&d.done);
  376. destroy_timer_on_stack(&t.split_timeout_timer);
  377. return d.rcode;
  378. }
  379. EXPORT_SYMBOL(fw_run_transaction);
  380. static DEFINE_MUTEX(phy_config_mutex);
  381. static DECLARE_COMPLETION(phy_config_done);
  382. static void transmit_phy_packet_callback(struct fw_packet *packet,
  383. struct fw_card *card, int status)
  384. {
  385. trace_async_phy_outbound_complete((uintptr_t)packet, card->index, packet->generation, status,
  386. packet->timestamp);
  387. complete(&phy_config_done);
  388. }
  389. static struct fw_packet phy_config_packet = {
  390. .header_length = 12,
  391. .payload_length = 0,
  392. .speed = SCODE_100,
  393. .callback = transmit_phy_packet_callback,
  394. };
  395. void fw_send_phy_config(struct fw_card *card,
  396. int node_id, int generation, int gap_count)
  397. {
  398. long timeout = DIV_ROUND_UP(HZ, 10);
  399. u32 data = 0;
  400. phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
  401. if (node_id != FW_PHY_CONFIG_NO_NODE_ID) {
  402. phy_packet_phy_config_set_root_id(&data, node_id);
  403. phy_packet_phy_config_set_force_root_node(&data, true);
  404. }
  405. if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
  406. gap_count = card->driver->read_phy_reg(card, 1);
  407. if (gap_count < 0)
  408. return;
  409. gap_count &= 63;
  410. if (gap_count == 63)
  411. return;
  412. }
  413. phy_packet_phy_config_set_gap_count(&data, gap_count);
  414. phy_packet_phy_config_set_gap_count_optimization(&data, true);
  415. guard(mutex)(&phy_config_mutex);
  416. async_header_set_tcode(phy_config_packet.header, TCODE_LINK_INTERNAL);
  417. phy_config_packet.header[1] = data;
  418. phy_config_packet.header[2] = ~data;
  419. phy_config_packet.generation = generation;
  420. reinit_completion(&phy_config_done);
  421. trace_async_phy_outbound_initiate((uintptr_t)&phy_config_packet, card->index,
  422. phy_config_packet.generation, phy_config_packet.header[1],
  423. phy_config_packet.header[2]);
  424. card->driver->send_request(card, &phy_config_packet);
  425. wait_for_completion_timeout(&phy_config_done, timeout);
  426. }
  427. static struct fw_address_handler *lookup_overlapping_address_handler(
  428. struct list_head *list, unsigned long long offset, size_t length)
  429. {
  430. struct fw_address_handler *handler;
  431. list_for_each_entry_rcu(handler, list, link) {
  432. if (handler->offset < offset + length &&
  433. offset < handler->offset + handler->length)
  434. return handler;
  435. }
  436. return NULL;
  437. }
  438. static bool is_enclosing_handler(struct fw_address_handler *handler,
  439. unsigned long long offset, size_t length)
  440. {
  441. return handler->offset <= offset &&
  442. offset + length <= handler->offset + handler->length;
  443. }
  444. static struct fw_address_handler *lookup_enclosing_address_handler(
  445. struct list_head *list, unsigned long long offset, size_t length)
  446. {
  447. struct fw_address_handler *handler;
  448. list_for_each_entry_rcu(handler, list, link) {
  449. if (is_enclosing_handler(handler, offset, length))
  450. return handler;
  451. }
  452. return NULL;
  453. }
  454. static DEFINE_SPINLOCK(address_handler_list_lock);
  455. static LIST_HEAD(address_handler_list);
  456. const struct fw_address_region fw_high_memory_region =
  457. { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
  458. EXPORT_SYMBOL(fw_high_memory_region);
  459. static const struct fw_address_region low_memory_region =
  460. { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
  461. #if 0
  462. const struct fw_address_region fw_private_region =
  463. { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
  464. const struct fw_address_region fw_csr_region =
  465. { .start = CSR_REGISTER_BASE,
  466. .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
  467. const struct fw_address_region fw_unit_space_region =
  468. { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
  469. #endif /* 0 */
  470. /**
  471. * fw_core_add_address_handler() - register for incoming requests
  472. * @handler: callback
  473. * @region: region in the IEEE 1212 node space address range
  474. *
  475. * region->start, ->end, and handler->length have to be quadlet-aligned.
  476. *
  477. * When a request is received that falls within the specified address range,
  478. * the specified callback is invoked. The parameters passed to the callback
  479. * give the details of the particular request.
  480. *
  481. * To be called in process context.
  482. * Return value: 0 on success, non-zero otherwise.
  483. *
  484. * The start offset of the handler's address region is determined by
  485. * fw_core_add_address_handler() and is returned in handler->offset.
  486. *
  487. * Address allocations are exclusive, except for the FCP registers.
  488. */
  489. int fw_core_add_address_handler(struct fw_address_handler *handler,
  490. const struct fw_address_region *region)
  491. {
  492. struct fw_address_handler *other;
  493. int ret = -EBUSY;
  494. if (region->start & 0xffff000000000003ULL ||
  495. region->start >= region->end ||
  496. region->end > 0x0001000000000000ULL ||
  497. handler->length & 3 ||
  498. handler->length == 0)
  499. return -EINVAL;
  500. guard(spinlock)(&address_handler_list_lock);
  501. handler->offset = region->start;
  502. while (handler->offset + handler->length <= region->end) {
  503. if (is_in_fcp_region(handler->offset, handler->length))
  504. other = NULL;
  505. else
  506. other = lookup_overlapping_address_handler
  507. (&address_handler_list,
  508. handler->offset, handler->length);
  509. if (other != NULL) {
  510. handler->offset += other->length;
  511. } else {
  512. list_add_tail_rcu(&handler->link, &address_handler_list);
  513. ret = 0;
  514. break;
  515. }
  516. }
  517. return ret;
  518. }
  519. EXPORT_SYMBOL(fw_core_add_address_handler);
  520. /**
  521. * fw_core_remove_address_handler() - unregister an address handler
  522. * @handler: callback
  523. *
  524. * To be called in process context.
  525. *
  526. * When fw_core_remove_address_handler() returns, @handler->callback() is
  527. * guaranteed to not run on any CPU anymore.
  528. */
  529. void fw_core_remove_address_handler(struct fw_address_handler *handler)
  530. {
  531. scoped_guard(spinlock, &address_handler_list_lock)
  532. list_del_rcu(&handler->link);
  533. synchronize_rcu();
  534. }
  535. EXPORT_SYMBOL(fw_core_remove_address_handler);
  536. struct fw_request {
  537. struct kref kref;
  538. struct fw_packet response;
  539. u32 request_header[ASYNC_HEADER_QUADLET_COUNT];
  540. int ack;
  541. u32 timestamp;
  542. u32 length;
  543. u32 data[];
  544. };
  545. void fw_request_get(struct fw_request *request)
  546. {
  547. kref_get(&request->kref);
  548. }
  549. static void release_request(struct kref *kref)
  550. {
  551. struct fw_request *request = container_of(kref, struct fw_request, kref);
  552. kfree(request);
  553. }
  554. void fw_request_put(struct fw_request *request)
  555. {
  556. kref_put(&request->kref, release_request);
  557. }
  558. static void free_response_callback(struct fw_packet *packet,
  559. struct fw_card *card, int status)
  560. {
  561. struct fw_request *request = container_of(packet, struct fw_request, response);
  562. trace_async_response_outbound_complete((uintptr_t)request, card->index, packet->generation,
  563. packet->speed, status, packet->timestamp);
  564. // Decrease the reference count since not at in-flight.
  565. fw_request_put(request);
  566. // Decrease the reference count to release the object.
  567. fw_request_put(request);
  568. }
  569. int fw_get_response_length(struct fw_request *r)
  570. {
  571. int tcode, ext_tcode, data_length;
  572. tcode = async_header_get_tcode(r->request_header);
  573. switch (tcode) {
  574. case TCODE_WRITE_QUADLET_REQUEST:
  575. case TCODE_WRITE_BLOCK_REQUEST:
  576. return 0;
  577. case TCODE_READ_QUADLET_REQUEST:
  578. return 4;
  579. case TCODE_READ_BLOCK_REQUEST:
  580. data_length = async_header_get_data_length(r->request_header);
  581. return data_length;
  582. case TCODE_LOCK_REQUEST:
  583. ext_tcode = async_header_get_extended_tcode(r->request_header);
  584. data_length = async_header_get_data_length(r->request_header);
  585. switch (ext_tcode) {
  586. case EXTCODE_FETCH_ADD:
  587. case EXTCODE_LITTLE_ADD:
  588. return data_length;
  589. default:
  590. return data_length / 2;
  591. }
  592. default:
  593. WARN(1, "wrong tcode %d\n", tcode);
  594. return 0;
  595. }
  596. }
  597. void fw_fill_response(struct fw_packet *response, u32 *request_header,
  598. int rcode, void *payload, size_t length)
  599. {
  600. int tcode, tlabel, extended_tcode, source, destination;
  601. tcode = async_header_get_tcode(request_header);
  602. tlabel = async_header_get_tlabel(request_header);
  603. source = async_header_get_destination(request_header); // Exchange.
  604. destination = async_header_get_source(request_header); // Exchange.
  605. extended_tcode = async_header_get_extended_tcode(request_header);
  606. async_header_set_retry(response->header, RETRY_1);
  607. async_header_set_tlabel(response->header, tlabel);
  608. async_header_set_destination(response->header, destination);
  609. async_header_set_source(response->header, source);
  610. async_header_set_rcode(response->header, rcode);
  611. response->header[2] = 0; // The field is reserved.
  612. switch (tcode) {
  613. case TCODE_WRITE_QUADLET_REQUEST:
  614. case TCODE_WRITE_BLOCK_REQUEST:
  615. async_header_set_tcode(response->header, TCODE_WRITE_RESPONSE);
  616. response->header_length = 12;
  617. response->payload_length = 0;
  618. break;
  619. case TCODE_READ_QUADLET_REQUEST:
  620. async_header_set_tcode(response->header, TCODE_READ_QUADLET_RESPONSE);
  621. if (payload != NULL)
  622. async_header_set_quadlet_data(response->header, *(u32 *)payload);
  623. else
  624. async_header_set_quadlet_data(response->header, 0);
  625. response->header_length = 16;
  626. response->payload_length = 0;
  627. break;
  628. case TCODE_READ_BLOCK_REQUEST:
  629. case TCODE_LOCK_REQUEST:
  630. async_header_set_tcode(response->header, tcode + 2);
  631. async_header_set_data_length(response->header, length);
  632. async_header_set_extended_tcode(response->header, extended_tcode);
  633. response->header_length = 16;
  634. response->payload = payload;
  635. response->payload_length = length;
  636. break;
  637. default:
  638. WARN(1, "wrong tcode %d\n", tcode);
  639. }
  640. response->payload_mapped = false;
  641. }
  642. EXPORT_SYMBOL(fw_fill_response);
  643. static u32 compute_split_timeout_timestamp(struct fw_card *card,
  644. u32 request_timestamp)
  645. {
  646. unsigned int cycles;
  647. u32 timestamp;
  648. cycles = card->split_timeout_cycles;
  649. cycles += request_timestamp & 0x1fff;
  650. timestamp = request_timestamp & ~0x1fff;
  651. timestamp += (cycles / 8000) << 13;
  652. timestamp |= cycles % 8000;
  653. return timestamp;
  654. }
  655. static struct fw_request *allocate_request(struct fw_card *card,
  656. struct fw_packet *p)
  657. {
  658. struct fw_request *request;
  659. u32 *data, length;
  660. int request_tcode;
  661. request_tcode = async_header_get_tcode(p->header);
  662. switch (request_tcode) {
  663. case TCODE_WRITE_QUADLET_REQUEST:
  664. data = &p->header[3];
  665. length = 4;
  666. break;
  667. case TCODE_WRITE_BLOCK_REQUEST:
  668. case TCODE_LOCK_REQUEST:
  669. data = p->payload;
  670. length = async_header_get_data_length(p->header);
  671. break;
  672. case TCODE_READ_QUADLET_REQUEST:
  673. data = NULL;
  674. length = 4;
  675. break;
  676. case TCODE_READ_BLOCK_REQUEST:
  677. data = NULL;
  678. length = async_header_get_data_length(p->header);
  679. break;
  680. default:
  681. fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
  682. p->header[0], p->header[1], p->header[2]);
  683. return NULL;
  684. }
  685. request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
  686. if (request == NULL)
  687. return NULL;
  688. kref_init(&request->kref);
  689. request->response.speed = p->speed;
  690. request->response.timestamp =
  691. compute_split_timeout_timestamp(card, p->timestamp);
  692. request->response.generation = p->generation;
  693. request->response.ack = 0;
  694. request->response.callback = free_response_callback;
  695. request->ack = p->ack;
  696. request->timestamp = p->timestamp;
  697. request->length = length;
  698. if (data)
  699. memcpy(request->data, data, length);
  700. memcpy(request->request_header, p->header, sizeof(p->header));
  701. return request;
  702. }
  703. /**
  704. * fw_send_response: - send response packet for asynchronous transaction.
  705. * @card: interface to send the response at.
  706. * @request: firewire request data for the transaction.
  707. * @rcode: response code to send.
  708. *
  709. * Submit a response packet into the asynchronous response transmission queue. The @request
  710. * is going to be released when the transmission successfully finishes later.
  711. */
  712. void fw_send_response(struct fw_card *card,
  713. struct fw_request *request, int rcode)
  714. {
  715. u32 *data = NULL;
  716. unsigned int data_length = 0;
  717. /* unified transaction or broadcast transaction: don't respond */
  718. if (request->ack != ACK_PENDING ||
  719. HEADER_DESTINATION_IS_BROADCAST(request->request_header)) {
  720. fw_request_put(request);
  721. return;
  722. }
  723. if (rcode == RCODE_COMPLETE) {
  724. data = request->data;
  725. data_length = fw_get_response_length(request);
  726. }
  727. fw_fill_response(&request->response, request->request_header, rcode, data, data_length);
  728. // Increase the reference count so that the object is kept during in-flight.
  729. fw_request_get(request);
  730. trace_async_response_outbound_initiate((uintptr_t)request, card->index,
  731. request->response.generation, request->response.speed,
  732. request->response.header, data,
  733. data ? data_length / 4 : 0);
  734. card->driver->send_response(card, &request->response);
  735. }
  736. EXPORT_SYMBOL(fw_send_response);
  737. /**
  738. * fw_get_request_speed() - returns speed at which the @request was received
  739. * @request: firewire request data
  740. */
  741. int fw_get_request_speed(struct fw_request *request)
  742. {
  743. return request->response.speed;
  744. }
  745. EXPORT_SYMBOL(fw_get_request_speed);
  746. /**
  747. * fw_request_get_timestamp: Get timestamp of the request.
  748. * @request: The opaque pointer to request structure.
  749. *
  750. * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
  751. * timestamp consists of the low order 3 bits of second field and the full 13 bits of count
  752. * field of isochronous cycle time register.
  753. *
  754. * Returns: timestamp of the request.
  755. */
  756. u32 fw_request_get_timestamp(const struct fw_request *request)
  757. {
  758. return request->timestamp;
  759. }
  760. EXPORT_SYMBOL_GPL(fw_request_get_timestamp);
  761. static void handle_exclusive_region_request(struct fw_card *card,
  762. struct fw_packet *p,
  763. struct fw_request *request,
  764. unsigned long long offset)
  765. {
  766. struct fw_address_handler *handler;
  767. int tcode, destination, source;
  768. destination = async_header_get_destination(p->header);
  769. source = async_header_get_source(p->header);
  770. tcode = async_header_get_tcode(p->header);
  771. if (tcode == TCODE_LOCK_REQUEST)
  772. tcode = 0x10 + async_header_get_extended_tcode(p->header);
  773. scoped_guard(rcu) {
  774. handler = lookup_enclosing_address_handler(&address_handler_list, offset,
  775. request->length);
  776. if (handler)
  777. handler->address_callback(card, request, tcode, destination, source,
  778. p->generation, offset, request->data,
  779. request->length, handler->callback_data);
  780. }
  781. if (!handler)
  782. fw_send_response(card, request, RCODE_ADDRESS_ERROR);
  783. }
  784. static void handle_fcp_region_request(struct fw_card *card,
  785. struct fw_packet *p,
  786. struct fw_request *request,
  787. unsigned long long offset)
  788. {
  789. struct fw_address_handler *handler;
  790. int tcode, destination, source;
  791. if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
  792. offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
  793. request->length > 0x200) {
  794. fw_send_response(card, request, RCODE_ADDRESS_ERROR);
  795. return;
  796. }
  797. tcode = async_header_get_tcode(p->header);
  798. destination = async_header_get_destination(p->header);
  799. source = async_header_get_source(p->header);
  800. if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
  801. tcode != TCODE_WRITE_BLOCK_REQUEST) {
  802. fw_send_response(card, request, RCODE_TYPE_ERROR);
  803. return;
  804. }
  805. scoped_guard(rcu) {
  806. list_for_each_entry_rcu(handler, &address_handler_list, link) {
  807. if (is_enclosing_handler(handler, offset, request->length))
  808. handler->address_callback(card, request, tcode, destination, source,
  809. p->generation, offset, request->data,
  810. request->length, handler->callback_data);
  811. }
  812. }
  813. fw_send_response(card, request, RCODE_COMPLETE);
  814. }
  815. void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
  816. {
  817. struct fw_request *request;
  818. unsigned long long offset;
  819. unsigned int tcode;
  820. if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
  821. return;
  822. tcode = async_header_get_tcode(p->header);
  823. if (tcode_is_link_internal(tcode)) {
  824. trace_async_phy_inbound((uintptr_t)p, card->index, p->generation, p->ack, p->timestamp,
  825. p->header[1], p->header[2]);
  826. fw_cdev_handle_phy_packet(card, p);
  827. return;
  828. }
  829. request = allocate_request(card, p);
  830. if (request == NULL) {
  831. /* FIXME: send statically allocated busy packet. */
  832. return;
  833. }
  834. trace_async_request_inbound((uintptr_t)request, card->index, p->generation, p->speed,
  835. p->ack, p->timestamp, p->header, request->data,
  836. tcode_is_read_request(tcode) ? 0 : request->length / 4);
  837. offset = async_header_get_offset(p->header);
  838. if (!is_in_fcp_region(offset, request->length))
  839. handle_exclusive_region_request(card, p, request, offset);
  840. else
  841. handle_fcp_region_request(card, p, request, offset);
  842. }
  843. EXPORT_SYMBOL(fw_core_handle_request);
  844. void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
  845. {
  846. struct fw_transaction *t = NULL, *iter;
  847. u32 *data;
  848. size_t data_length;
  849. int tcode, tlabel, source, rcode;
  850. tcode = async_header_get_tcode(p->header);
  851. tlabel = async_header_get_tlabel(p->header);
  852. source = async_header_get_source(p->header);
  853. rcode = async_header_get_rcode(p->header);
  854. // FIXME: sanity check packet, is length correct, does tcodes
  855. // and addresses match to the transaction request queried later.
  856. //
  857. // For the tracepoints event, let us decode the header here against the concern.
  858. switch (tcode) {
  859. case TCODE_READ_QUADLET_RESPONSE:
  860. data = (u32 *) &p->header[3];
  861. data_length = 4;
  862. break;
  863. case TCODE_WRITE_RESPONSE:
  864. data = NULL;
  865. data_length = 0;
  866. break;
  867. case TCODE_READ_BLOCK_RESPONSE:
  868. case TCODE_LOCK_RESPONSE:
  869. data = p->payload;
  870. data_length = async_header_get_data_length(p->header);
  871. break;
  872. default:
  873. /* Should never happen, this is just to shut up gcc. */
  874. data = NULL;
  875. data_length = 0;
  876. break;
  877. }
  878. scoped_guard(spinlock_irqsave, &card->lock) {
  879. list_for_each_entry(iter, &card->transaction_list, link) {
  880. if (iter->node_id == source && iter->tlabel == tlabel) {
  881. if (try_cancel_split_timeout(iter)) {
  882. list_del_init(&iter->link);
  883. card->tlabel_mask &= ~(1ULL << iter->tlabel);
  884. t = iter;
  885. }
  886. break;
  887. }
  888. }
  889. }
  890. trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
  891. p->timestamp, p->header, data, data_length / 4);
  892. if (!t) {
  893. fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
  894. source, tlabel);
  895. return;
  896. }
  897. /*
  898. * The response handler may be executed while the request handler
  899. * is still pending. Cancel the request handler.
  900. */
  901. card->driver->cancel_packet(card, &t->packet);
  902. if (!t->with_tstamp) {
  903. t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data);
  904. } else {
  905. t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data,
  906. data_length, t->callback_data);
  907. }
  908. }
  909. EXPORT_SYMBOL(fw_core_handle_response);
  910. /**
  911. * fw_rcode_string - convert a firewire result code to an error description
  912. * @rcode: the result code
  913. */
  914. const char *fw_rcode_string(int rcode)
  915. {
  916. static const char *const names[] = {
  917. [RCODE_COMPLETE] = "no error",
  918. [RCODE_CONFLICT_ERROR] = "conflict error",
  919. [RCODE_DATA_ERROR] = "data error",
  920. [RCODE_TYPE_ERROR] = "type error",
  921. [RCODE_ADDRESS_ERROR] = "address error",
  922. [RCODE_SEND_ERROR] = "send error",
  923. [RCODE_CANCELLED] = "timeout",
  924. [RCODE_BUSY] = "busy",
  925. [RCODE_GENERATION] = "bus reset",
  926. [RCODE_NO_ACK] = "no ack",
  927. };
  928. if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
  929. return names[rcode];
  930. else
  931. return "unknown";
  932. }
  933. EXPORT_SYMBOL(fw_rcode_string);
  934. static const struct fw_address_region topology_map_region =
  935. { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
  936. .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
  937. static void handle_topology_map(struct fw_card *card, struct fw_request *request,
  938. int tcode, int destination, int source, int generation,
  939. unsigned long long offset, void *payload, size_t length,
  940. void *callback_data)
  941. {
  942. int start;
  943. if (!tcode_is_read_request(tcode)) {
  944. fw_send_response(card, request, RCODE_TYPE_ERROR);
  945. return;
  946. }
  947. if ((offset & 3) > 0 || (length & 3) > 0) {
  948. fw_send_response(card, request, RCODE_ADDRESS_ERROR);
  949. return;
  950. }
  951. start = (offset - topology_map_region.start) / 4;
  952. memcpy(payload, &card->topology_map[start], length);
  953. fw_send_response(card, request, RCODE_COMPLETE);
  954. }
  955. static struct fw_address_handler topology_map = {
  956. .length = 0x400,
  957. .address_callback = handle_topology_map,
  958. };
  959. static const struct fw_address_region registers_region =
  960. { .start = CSR_REGISTER_BASE,
  961. .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
  962. static void update_split_timeout(struct fw_card *card)
  963. {
  964. unsigned int cycles;
  965. cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
  966. /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
  967. cycles = clamp(cycles, 800u, 3u * 8000u);
  968. card->split_timeout_cycles = cycles;
  969. card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
  970. }
  971. static void handle_registers(struct fw_card *card, struct fw_request *request,
  972. int tcode, int destination, int source, int generation,
  973. unsigned long long offset, void *payload, size_t length,
  974. void *callback_data)
  975. {
  976. int reg = offset & ~CSR_REGISTER_BASE;
  977. __be32 *data = payload;
  978. int rcode = RCODE_COMPLETE;
  979. switch (reg) {
  980. case CSR_PRIORITY_BUDGET:
  981. if (!card->priority_budget_implemented) {
  982. rcode = RCODE_ADDRESS_ERROR;
  983. break;
  984. }
  985. fallthrough;
  986. case CSR_NODE_IDS:
  987. /*
  988. * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
  989. * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
  990. */
  991. fallthrough;
  992. case CSR_STATE_CLEAR:
  993. case CSR_STATE_SET:
  994. case CSR_CYCLE_TIME:
  995. case CSR_BUS_TIME:
  996. case CSR_BUSY_TIMEOUT:
  997. if (tcode == TCODE_READ_QUADLET_REQUEST)
  998. *data = cpu_to_be32(card->driver->read_csr(card, reg));
  999. else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
  1000. card->driver->write_csr(card, reg, be32_to_cpu(*data));
  1001. else
  1002. rcode = RCODE_TYPE_ERROR;
  1003. break;
  1004. case CSR_RESET_START:
  1005. if (tcode == TCODE_WRITE_QUADLET_REQUEST)
  1006. card->driver->write_csr(card, CSR_STATE_CLEAR,
  1007. CSR_STATE_BIT_ABDICATE);
  1008. else
  1009. rcode = RCODE_TYPE_ERROR;
  1010. break;
  1011. case CSR_SPLIT_TIMEOUT_HI:
  1012. if (tcode == TCODE_READ_QUADLET_REQUEST) {
  1013. *data = cpu_to_be32(card->split_timeout_hi);
  1014. } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
  1015. guard(spinlock_irqsave)(&card->lock);
  1016. card->split_timeout_hi = be32_to_cpu(*data) & 7;
  1017. update_split_timeout(card);
  1018. } else {
  1019. rcode = RCODE_TYPE_ERROR;
  1020. }
  1021. break;
  1022. case CSR_SPLIT_TIMEOUT_LO:
  1023. if (tcode == TCODE_READ_QUADLET_REQUEST) {
  1024. *data = cpu_to_be32(card->split_timeout_lo);
  1025. } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
  1026. guard(spinlock_irqsave)(&card->lock);
  1027. card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
  1028. update_split_timeout(card);
  1029. } else {
  1030. rcode = RCODE_TYPE_ERROR;
  1031. }
  1032. break;
  1033. case CSR_MAINT_UTILITY:
  1034. if (tcode == TCODE_READ_QUADLET_REQUEST)
  1035. *data = card->maint_utility_register;
  1036. else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
  1037. card->maint_utility_register = *data;
  1038. else
  1039. rcode = RCODE_TYPE_ERROR;
  1040. break;
  1041. case CSR_BROADCAST_CHANNEL:
  1042. if (tcode == TCODE_READ_QUADLET_REQUEST)
  1043. *data = cpu_to_be32(card->broadcast_channel);
  1044. else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
  1045. card->broadcast_channel =
  1046. (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
  1047. BROADCAST_CHANNEL_INITIAL;
  1048. else
  1049. rcode = RCODE_TYPE_ERROR;
  1050. break;
  1051. case CSR_BUS_MANAGER_ID:
  1052. case CSR_BANDWIDTH_AVAILABLE:
  1053. case CSR_CHANNELS_AVAILABLE_HI:
  1054. case CSR_CHANNELS_AVAILABLE_LO:
  1055. /*
  1056. * FIXME: these are handled by the OHCI hardware and
  1057. * the stack never sees these request. If we add
  1058. * support for a new type of controller that doesn't
  1059. * handle this in hardware we need to deal with these
  1060. * transactions.
  1061. */
  1062. BUG();
  1063. break;
  1064. default:
  1065. rcode = RCODE_ADDRESS_ERROR;
  1066. break;
  1067. }
  1068. fw_send_response(card, request, rcode);
  1069. }
  1070. static struct fw_address_handler registers = {
  1071. .length = 0x400,
  1072. .address_callback = handle_registers,
  1073. };
  1074. static void handle_low_memory(struct fw_card *card, struct fw_request *request,
  1075. int tcode, int destination, int source, int generation,
  1076. unsigned long long offset, void *payload, size_t length,
  1077. void *callback_data)
  1078. {
  1079. /*
  1080. * This catches requests not handled by the physical DMA unit,
  1081. * i.e., wrong transaction types or unauthorized source nodes.
  1082. */
  1083. fw_send_response(card, request, RCODE_TYPE_ERROR);
  1084. }
  1085. static struct fw_address_handler low_memory = {
  1086. .length = FW_MAX_PHYSICAL_RANGE,
  1087. .address_callback = handle_low_memory,
  1088. };
  1089. MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
  1090. MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
  1091. MODULE_LICENSE("GPL");
  1092. static const u32 vendor_textual_descriptor[] = {
  1093. /* textual descriptor leaf () */
  1094. 0x00060000,
  1095. 0x00000000,
  1096. 0x00000000,
  1097. 0x4c696e75, /* L i n u */
  1098. 0x78204669, /* x F i */
  1099. 0x72657769, /* r e w i */
  1100. 0x72650000, /* r e */
  1101. };
  1102. static const u32 model_textual_descriptor[] = {
  1103. /* model descriptor leaf () */
  1104. 0x00030000,
  1105. 0x00000000,
  1106. 0x00000000,
  1107. 0x4a756a75, /* J u j u */
  1108. };
  1109. static struct fw_descriptor vendor_id_descriptor = {
  1110. .length = ARRAY_SIZE(vendor_textual_descriptor),
  1111. .immediate = 0x03001f11,
  1112. .key = 0x81000000,
  1113. .data = vendor_textual_descriptor,
  1114. };
  1115. static struct fw_descriptor model_id_descriptor = {
  1116. .length = ARRAY_SIZE(model_textual_descriptor),
  1117. .immediate = 0x17023901,
  1118. .key = 0x81000000,
  1119. .data = model_textual_descriptor,
  1120. };
  1121. static int __init fw_core_init(void)
  1122. {
  1123. int ret;
  1124. fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
  1125. if (!fw_workqueue)
  1126. return -ENOMEM;
  1127. ret = bus_register(&fw_bus_type);
  1128. if (ret < 0) {
  1129. destroy_workqueue(fw_workqueue);
  1130. return ret;
  1131. }
  1132. fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
  1133. if (fw_cdev_major < 0) {
  1134. bus_unregister(&fw_bus_type);
  1135. destroy_workqueue(fw_workqueue);
  1136. return fw_cdev_major;
  1137. }
  1138. fw_core_add_address_handler(&topology_map, &topology_map_region);
  1139. fw_core_add_address_handler(&registers, &registers_region);
  1140. fw_core_add_address_handler(&low_memory, &low_memory_region);
  1141. fw_core_add_descriptor(&vendor_id_descriptor);
  1142. fw_core_add_descriptor(&model_id_descriptor);
  1143. return 0;
  1144. }
  1145. static void __exit fw_core_cleanup(void)
  1146. {
  1147. unregister_chrdev(fw_cdev_major, "firewire");
  1148. bus_unregister(&fw_bus_type);
  1149. destroy_workqueue(fw_workqueue);
  1150. xa_destroy(&fw_device_xa);
  1151. }
  1152. module_init(fw_core_init);
  1153. module_exit(fw_core_cleanup);