sas_init.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683
  1. /*
  2. * Serial Attached SCSI (SAS) Transport Layer initialization
  3. *
  4. * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
  5. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
  6. *
  7. * This file is licensed under GPLv2.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License as
  11. * published by the Free Software Foundation; either version 2 of the
  12. * License, or (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but
  15. * WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  22. * USA
  23. *
  24. */
  25. #include <linux/module.h>
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/device.h>
  29. #include <linux/spinlock.h>
  30. #include <scsi/sas_ata.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_transport.h>
  34. #include <scsi/scsi_transport_sas.h>
  35. #include "sas_internal.h"
  36. #include "../scsi_sas_internal.h"
  37. static struct kmem_cache *sas_task_cache;
  38. static struct kmem_cache *sas_event_cache;
  39. struct sas_task *sas_alloc_task(gfp_t flags)
  40. {
  41. struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
  42. if (task) {
  43. spin_lock_init(&task->task_state_lock);
  44. task->task_state_flags = SAS_TASK_STATE_PENDING;
  45. }
  46. return task;
  47. }
  48. EXPORT_SYMBOL_GPL(sas_alloc_task);
  49. struct sas_task *sas_alloc_slow_task(gfp_t flags)
  50. {
  51. struct sas_task *task = sas_alloc_task(flags);
  52. struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
  53. if (!task || !slow) {
  54. if (task)
  55. kmem_cache_free(sas_task_cache, task);
  56. kfree(slow);
  57. return NULL;
  58. }
  59. task->slow_task = slow;
  60. slow->task = task;
  61. timer_setup(&slow->timer, NULL, 0);
  62. init_completion(&slow->completion);
  63. return task;
  64. }
  65. EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
  66. void sas_free_task(struct sas_task *task)
  67. {
  68. if (task) {
  69. kfree(task->slow_task);
  70. kmem_cache_free(sas_task_cache, task);
  71. }
  72. }
  73. EXPORT_SYMBOL_GPL(sas_free_task);
  74. /*------------ SAS addr hash -----------*/
  75. void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
  76. {
  77. const u32 poly = 0x00DB2777;
  78. u32 r = 0;
  79. int i;
  80. for (i = 0; i < 8; i++) {
  81. int b;
  82. for (b = 7; b >= 0; b--) {
  83. r <<= 1;
  84. if ((1 << b) & sas_addr[i]) {
  85. if (!(r & 0x01000000))
  86. r ^= poly;
  87. } else if (r & 0x01000000)
  88. r ^= poly;
  89. }
  90. }
  91. hashed[0] = (r >> 16) & 0xFF;
  92. hashed[1] = (r >> 8) & 0xFF ;
  93. hashed[2] = r & 0xFF;
  94. }
  95. int sas_register_ha(struct sas_ha_struct *sas_ha)
  96. {
  97. char name[64];
  98. int error = 0;
  99. mutex_init(&sas_ha->disco_mutex);
  100. spin_lock_init(&sas_ha->phy_port_lock);
  101. sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
  102. set_bit(SAS_HA_REGISTERED, &sas_ha->state);
  103. spin_lock_init(&sas_ha->lock);
  104. mutex_init(&sas_ha->drain_mutex);
  105. init_waitqueue_head(&sas_ha->eh_wait_q);
  106. INIT_LIST_HEAD(&sas_ha->defer_q);
  107. INIT_LIST_HEAD(&sas_ha->eh_dev_q);
  108. sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
  109. error = sas_register_phys(sas_ha);
  110. if (error) {
  111. printk(KERN_NOTICE "couldn't register sas phys:%d\n", error);
  112. return error;
  113. }
  114. error = sas_register_ports(sas_ha);
  115. if (error) {
  116. printk(KERN_NOTICE "couldn't register sas ports:%d\n", error);
  117. goto Undo_phys;
  118. }
  119. error = sas_init_events(sas_ha);
  120. if (error) {
  121. printk(KERN_NOTICE "couldn't start event thread:%d\n", error);
  122. goto Undo_ports;
  123. }
  124. error = -ENOMEM;
  125. snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
  126. sas_ha->event_q = create_singlethread_workqueue(name);
  127. if (!sas_ha->event_q)
  128. goto Undo_ports;
  129. snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
  130. sas_ha->disco_q = create_singlethread_workqueue(name);
  131. if (!sas_ha->disco_q)
  132. goto Undo_event_q;
  133. INIT_LIST_HEAD(&sas_ha->eh_done_q);
  134. INIT_LIST_HEAD(&sas_ha->eh_ata_q);
  135. return 0;
  136. Undo_event_q:
  137. destroy_workqueue(sas_ha->event_q);
  138. Undo_ports:
  139. sas_unregister_ports(sas_ha);
  140. Undo_phys:
  141. return error;
  142. }
  143. static void sas_disable_events(struct sas_ha_struct *sas_ha)
  144. {
  145. /* Set the state to unregistered to avoid further unchained
  146. * events to be queued, and flush any in-progress drainers
  147. */
  148. mutex_lock(&sas_ha->drain_mutex);
  149. spin_lock_irq(&sas_ha->lock);
  150. clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
  151. spin_unlock_irq(&sas_ha->lock);
  152. __sas_drain_work(sas_ha);
  153. mutex_unlock(&sas_ha->drain_mutex);
  154. }
  155. int sas_unregister_ha(struct sas_ha_struct *sas_ha)
  156. {
  157. sas_disable_events(sas_ha);
  158. sas_unregister_ports(sas_ha);
  159. /* flush unregistration work */
  160. mutex_lock(&sas_ha->drain_mutex);
  161. __sas_drain_work(sas_ha);
  162. mutex_unlock(&sas_ha->drain_mutex);
  163. destroy_workqueue(sas_ha->disco_q);
  164. destroy_workqueue(sas_ha->event_q);
  165. return 0;
  166. }
  167. static int sas_get_linkerrors(struct sas_phy *phy)
  168. {
  169. if (scsi_is_sas_phy_local(phy)) {
  170. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  171. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  172. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  173. struct sas_internal *i =
  174. to_sas_internal(sas_ha->core.shost->transportt);
  175. return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
  176. }
  177. return sas_smp_get_phy_events(phy);
  178. }
  179. int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
  180. {
  181. struct domain_device *dev = NULL;
  182. /* try to route user requested link resets through libata */
  183. if (asd_phy->port)
  184. dev = asd_phy->port->port_dev;
  185. /* validate that dev has been probed */
  186. if (dev)
  187. dev = sas_find_dev_by_rphy(dev->rphy);
  188. if (dev && dev_is_sata(dev)) {
  189. sas_ata_schedule_reset(dev);
  190. sas_ata_wait_eh(dev);
  191. return 0;
  192. }
  193. return -ENODEV;
  194. }
  195. /*
  196. * transport_sas_phy_reset - reset a phy and permit libata to manage the link
  197. *
  198. * phy reset request via sysfs in host workqueue context so we know we
  199. * can block on eh and safely traverse the domain_device topology
  200. */
  201. static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
  202. {
  203. enum phy_func reset_type;
  204. if (hard_reset)
  205. reset_type = PHY_FUNC_HARD_RESET;
  206. else
  207. reset_type = PHY_FUNC_LINK_RESET;
  208. if (scsi_is_sas_phy_local(phy)) {
  209. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  210. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  211. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  212. struct sas_internal *i =
  213. to_sas_internal(sas_ha->core.shost->transportt);
  214. if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
  215. return 0;
  216. return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
  217. } else {
  218. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  219. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  220. struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
  221. if (ata_dev && !hard_reset) {
  222. sas_ata_schedule_reset(ata_dev);
  223. sas_ata_wait_eh(ata_dev);
  224. return 0;
  225. } else
  226. return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
  227. }
  228. }
  229. static int sas_phy_enable(struct sas_phy *phy, int enable)
  230. {
  231. int ret;
  232. enum phy_func cmd;
  233. if (enable)
  234. cmd = PHY_FUNC_LINK_RESET;
  235. else
  236. cmd = PHY_FUNC_DISABLE;
  237. if (scsi_is_sas_phy_local(phy)) {
  238. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  239. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  240. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  241. struct sas_internal *i =
  242. to_sas_internal(sas_ha->core.shost->transportt);
  243. if (enable)
  244. ret = transport_sas_phy_reset(phy, 0);
  245. else
  246. ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
  247. } else {
  248. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  249. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  250. if (enable)
  251. ret = transport_sas_phy_reset(phy, 0);
  252. else
  253. ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
  254. }
  255. return ret;
  256. }
  257. int sas_phy_reset(struct sas_phy *phy, int hard_reset)
  258. {
  259. int ret;
  260. enum phy_func reset_type;
  261. if (!phy->enabled)
  262. return -ENODEV;
  263. if (hard_reset)
  264. reset_type = PHY_FUNC_HARD_RESET;
  265. else
  266. reset_type = PHY_FUNC_LINK_RESET;
  267. if (scsi_is_sas_phy_local(phy)) {
  268. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  269. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  270. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  271. struct sas_internal *i =
  272. to_sas_internal(sas_ha->core.shost->transportt);
  273. ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
  274. } else {
  275. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  276. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  277. ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
  278. }
  279. return ret;
  280. }
  281. int sas_set_phy_speed(struct sas_phy *phy,
  282. struct sas_phy_linkrates *rates)
  283. {
  284. int ret;
  285. if ((rates->minimum_linkrate &&
  286. rates->minimum_linkrate > phy->maximum_linkrate) ||
  287. (rates->maximum_linkrate &&
  288. rates->maximum_linkrate < phy->minimum_linkrate))
  289. return -EINVAL;
  290. if (rates->minimum_linkrate &&
  291. rates->minimum_linkrate < phy->minimum_linkrate_hw)
  292. rates->minimum_linkrate = phy->minimum_linkrate_hw;
  293. if (rates->maximum_linkrate &&
  294. rates->maximum_linkrate > phy->maximum_linkrate_hw)
  295. rates->maximum_linkrate = phy->maximum_linkrate_hw;
  296. if (scsi_is_sas_phy_local(phy)) {
  297. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  298. struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
  299. struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
  300. struct sas_internal *i =
  301. to_sas_internal(sas_ha->core.shost->transportt);
  302. ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
  303. rates);
  304. } else {
  305. struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
  306. struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
  307. ret = sas_smp_phy_control(ddev, phy->number,
  308. PHY_FUNC_LINK_RESET, rates);
  309. }
  310. return ret;
  311. }
  312. void sas_prep_resume_ha(struct sas_ha_struct *ha)
  313. {
  314. int i;
  315. set_bit(SAS_HA_REGISTERED, &ha->state);
  316. /* clear out any stale link events/data from the suspension path */
  317. for (i = 0; i < ha->num_phys; i++) {
  318. struct asd_sas_phy *phy = ha->sas_phy[i];
  319. memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
  320. phy->frame_rcvd_size = 0;
  321. }
  322. }
  323. EXPORT_SYMBOL(sas_prep_resume_ha);
  324. static int phys_suspended(struct sas_ha_struct *ha)
  325. {
  326. int i, rc = 0;
  327. for (i = 0; i < ha->num_phys; i++) {
  328. struct asd_sas_phy *phy = ha->sas_phy[i];
  329. if (phy->suspended)
  330. rc++;
  331. }
  332. return rc;
  333. }
  334. void sas_resume_ha(struct sas_ha_struct *ha)
  335. {
  336. const unsigned long tmo = msecs_to_jiffies(25000);
  337. int i;
  338. /* deform ports on phys that did not resume
  339. * at this point we may be racing the phy coming back (as posted
  340. * by the lldd). So we post the event and once we are in the
  341. * libsas context check that the phy remains suspended before
  342. * tearing it down.
  343. */
  344. i = phys_suspended(ha);
  345. if (i)
  346. dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
  347. i, i > 1 ? "s" : "");
  348. wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
  349. for (i = 0; i < ha->num_phys; i++) {
  350. struct asd_sas_phy *phy = ha->sas_phy[i];
  351. if (phy->suspended) {
  352. dev_warn(&phy->phy->dev, "resume timeout\n");
  353. sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
  354. }
  355. }
  356. /* all phys are back up or timed out, turn on i/o so we can
  357. * flush out disks that did not return
  358. */
  359. scsi_unblock_requests(ha->core.shost);
  360. sas_drain_work(ha);
  361. }
  362. EXPORT_SYMBOL(sas_resume_ha);
  363. void sas_suspend_ha(struct sas_ha_struct *ha)
  364. {
  365. int i;
  366. sas_disable_events(ha);
  367. scsi_block_requests(ha->core.shost);
  368. for (i = 0; i < ha->num_phys; i++) {
  369. struct asd_sas_port *port = ha->sas_port[i];
  370. sas_discover_event(port, DISCE_SUSPEND);
  371. }
  372. /* flush suspend events while unregistered */
  373. mutex_lock(&ha->drain_mutex);
  374. __sas_drain_work(ha);
  375. mutex_unlock(&ha->drain_mutex);
  376. }
  377. EXPORT_SYMBOL(sas_suspend_ha);
  378. static void sas_phy_release(struct sas_phy *phy)
  379. {
  380. kfree(phy->hostdata);
  381. phy->hostdata = NULL;
  382. }
  383. static void phy_reset_work(struct work_struct *work)
  384. {
  385. struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
  386. d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
  387. }
  388. static void phy_enable_work(struct work_struct *work)
  389. {
  390. struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
  391. d->enable_result = sas_phy_enable(d->phy, d->enable);
  392. }
  393. static int sas_phy_setup(struct sas_phy *phy)
  394. {
  395. struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
  396. if (!d)
  397. return -ENOMEM;
  398. mutex_init(&d->event_lock);
  399. INIT_SAS_WORK(&d->reset_work, phy_reset_work);
  400. INIT_SAS_WORK(&d->enable_work, phy_enable_work);
  401. d->phy = phy;
  402. phy->hostdata = d;
  403. return 0;
  404. }
  405. static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
  406. {
  407. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  408. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  409. struct sas_phy_data *d = phy->hostdata;
  410. int rc;
  411. if (!d)
  412. return -ENOMEM;
  413. /* libsas workqueue coordinates ata-eh reset with discovery */
  414. mutex_lock(&d->event_lock);
  415. d->reset_result = 0;
  416. d->hard_reset = hard_reset;
  417. spin_lock_irq(&ha->lock);
  418. sas_queue_work(ha, &d->reset_work);
  419. spin_unlock_irq(&ha->lock);
  420. rc = sas_drain_work(ha);
  421. if (rc == 0)
  422. rc = d->reset_result;
  423. mutex_unlock(&d->event_lock);
  424. return rc;
  425. }
  426. static int queue_phy_enable(struct sas_phy *phy, int enable)
  427. {
  428. struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
  429. struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
  430. struct sas_phy_data *d = phy->hostdata;
  431. int rc;
  432. if (!d)
  433. return -ENOMEM;
  434. /* libsas workqueue coordinates ata-eh reset with discovery */
  435. mutex_lock(&d->event_lock);
  436. d->enable_result = 0;
  437. d->enable = enable;
  438. spin_lock_irq(&ha->lock);
  439. sas_queue_work(ha, &d->enable_work);
  440. spin_unlock_irq(&ha->lock);
  441. rc = sas_drain_work(ha);
  442. if (rc == 0)
  443. rc = d->enable_result;
  444. mutex_unlock(&d->event_lock);
  445. return rc;
  446. }
  447. static struct sas_function_template sft = {
  448. .phy_enable = queue_phy_enable,
  449. .phy_reset = queue_phy_reset,
  450. .phy_setup = sas_phy_setup,
  451. .phy_release = sas_phy_release,
  452. .set_phy_speed = sas_set_phy_speed,
  453. .get_linkerrors = sas_get_linkerrors,
  454. .smp_handler = sas_smp_handler,
  455. };
  456. static inline ssize_t phy_event_threshold_show(struct device *dev,
  457. struct device_attribute *attr, char *buf)
  458. {
  459. struct Scsi_Host *shost = class_to_shost(dev);
  460. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  461. return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
  462. }
  463. static inline ssize_t phy_event_threshold_store(struct device *dev,
  464. struct device_attribute *attr,
  465. const char *buf, size_t count)
  466. {
  467. struct Scsi_Host *shost = class_to_shost(dev);
  468. struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
  469. sha->event_thres = simple_strtol(buf, NULL, 10);
  470. /* threshold cannot be set too small */
  471. if (sha->event_thres < 32)
  472. sha->event_thres = 32;
  473. return count;
  474. }
  475. DEVICE_ATTR(phy_event_threshold,
  476. S_IRUGO|S_IWUSR,
  477. phy_event_threshold_show,
  478. phy_event_threshold_store);
  479. EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
  480. struct scsi_transport_template *
  481. sas_domain_attach_transport(struct sas_domain_function_template *dft)
  482. {
  483. struct scsi_transport_template *stt = sas_attach_transport(&sft);
  484. struct sas_internal *i;
  485. if (!stt)
  486. return stt;
  487. i = to_sas_internal(stt);
  488. i->dft = dft;
  489. stt->create_work_queue = 1;
  490. stt->eh_strategy_handler = sas_scsi_recover_host;
  491. return stt;
  492. }
  493. EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
  494. struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
  495. {
  496. struct asd_sas_event *event;
  497. gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
  498. struct sas_ha_struct *sas_ha = phy->ha;
  499. struct sas_internal *i =
  500. to_sas_internal(sas_ha->core.shost->transportt);
  501. event = kmem_cache_zalloc(sas_event_cache, flags);
  502. if (!event)
  503. return NULL;
  504. atomic_inc(&phy->event_nr);
  505. if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
  506. if (i->dft->lldd_control_phy) {
  507. if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
  508. sas_printk("The phy%02d bursting events, shut it down.\n",
  509. phy->id);
  510. sas_notify_phy_event(phy, PHYE_SHUTDOWN);
  511. }
  512. } else {
  513. /* Do not support PHY control, stop allocating events */
  514. WARN_ONCE(1, "PHY control not supported.\n");
  515. kmem_cache_free(sas_event_cache, event);
  516. atomic_dec(&phy->event_nr);
  517. event = NULL;
  518. }
  519. }
  520. return event;
  521. }
  522. void sas_free_event(struct asd_sas_event *event)
  523. {
  524. struct asd_sas_phy *phy = event->phy;
  525. kmem_cache_free(sas_event_cache, event);
  526. atomic_dec(&phy->event_nr);
  527. }
  528. /* ---------- SAS Class register/unregister ---------- */
  529. static int __init sas_class_init(void)
  530. {
  531. sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
  532. if (!sas_task_cache)
  533. goto out;
  534. sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
  535. if (!sas_event_cache)
  536. goto free_task_kmem;
  537. return 0;
  538. free_task_kmem:
  539. kmem_cache_destroy(sas_task_cache);
  540. out:
  541. return -ENOMEM;
  542. }
  543. static void __exit sas_class_exit(void)
  544. {
  545. kmem_cache_destroy(sas_task_cache);
  546. kmem_cache_destroy(sas_event_cache);
  547. }
  548. MODULE_AUTHOR("Luben Tuikov <luben_tuikov@adaptec.com>");
  549. MODULE_DESCRIPTION("SAS Transport Layer");
  550. MODULE_LICENSE("GPL v2");
  551. module_init(sas_class_init);
  552. module_exit(sas_class_exit);
  553. EXPORT_SYMBOL_GPL(sas_register_ha);
  554. EXPORT_SYMBOL_GPL(sas_unregister_ha);