fnic_fcs.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  4. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  5. */
  6. #include <linux/errno.h>
  7. #include <linux/pci.h>
  8. #include <linux/slab.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/if_ether.h>
  13. #include <linux/if_vlan.h>
  14. #include <linux/workqueue.h>
  15. #include <scsi/fc/fc_fip.h>
  16. #include <scsi/fc/fc_els.h>
  17. #include <scsi/fc/fc_fcoe.h>
  18. #include <scsi/fc_frame.h>
  19. #include <scsi/libfc.h>
  20. #include "fnic_io.h"
  21. #include "fnic.h"
  22. #include "fnic_fip.h"
  23. #include "cq_enet_desc.h"
  24. #include "cq_exch_desc.h"
  25. static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
  26. struct workqueue_struct *fnic_fip_queue;
  27. struct workqueue_struct *fnic_event_queue;
  28. static void fnic_set_eth_mode(struct fnic *);
  29. static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
  30. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
  31. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
  32. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
  33. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
  34. void fnic_handle_link(struct work_struct *work)
  35. {
  36. struct fnic *fnic = container_of(work, struct fnic, link_work);
  37. unsigned long flags;
  38. int old_link_status;
  39. u32 old_link_down_cnt;
  40. u64 old_port_speed, new_port_speed;
  41. spin_lock_irqsave(&fnic->fnic_lock, flags);
  42. fnic->link_events = 1; /* less work to just set everytime*/
  43. if (fnic->stop_rx_link_events) {
  44. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  45. return;
  46. }
  47. old_link_down_cnt = fnic->link_down_cnt;
  48. old_link_status = fnic->link_status;
  49. old_port_speed = atomic64_read(
  50. &fnic->fnic_stats.misc_stats.current_port_speed);
  51. fnic->link_status = vnic_dev_link_status(fnic->vdev);
  52. fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
  53. new_port_speed = vnic_dev_port_speed(fnic->vdev);
  54. atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
  55. new_port_speed);
  56. if (old_port_speed != new_port_speed)
  57. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  58. "Current vnic speed set to: %llu\n",
  59. new_port_speed);
  60. switch (vnic_dev_port_speed(fnic->vdev)) {
  61. case DCEM_PORTSPEED_10G:
  62. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
  63. fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
  64. break;
  65. case DCEM_PORTSPEED_20G:
  66. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
  67. fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
  68. break;
  69. case DCEM_PORTSPEED_25G:
  70. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
  71. fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
  72. break;
  73. case DCEM_PORTSPEED_40G:
  74. case DCEM_PORTSPEED_4x10G:
  75. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
  76. fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
  77. break;
  78. case DCEM_PORTSPEED_100G:
  79. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
  80. fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
  81. break;
  82. default:
  83. fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
  84. fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
  85. break;
  86. }
  87. if (old_link_status == fnic->link_status) {
  88. if (!fnic->link_status) {
  89. /* DOWN -> DOWN */
  90. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  91. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  92. FNIC_FC_LE, "Link Status: DOWN->DOWN",
  93. strlen("Link Status: DOWN->DOWN"));
  94. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  95. "down->down\n");
  96. } else {
  97. if (old_link_down_cnt != fnic->link_down_cnt) {
  98. /* UP -> DOWN -> UP */
  99. fnic->lport->host_stats.link_failure_count++;
  100. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  101. fnic_fc_trace_set_data(
  102. fnic->lport->host->host_no,
  103. FNIC_FC_LE,
  104. "Link Status:UP_DOWN_UP",
  105. strlen("Link_Status:UP_DOWN_UP")
  106. );
  107. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  108. "link down\n");
  109. fcoe_ctlr_link_down(&fnic->ctlr);
  110. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  111. /* start FCoE VLAN discovery */
  112. fnic_fc_trace_set_data(
  113. fnic->lport->host->host_no,
  114. FNIC_FC_LE,
  115. "Link Status: UP_DOWN_UP_VLAN",
  116. strlen(
  117. "Link Status: UP_DOWN_UP_VLAN")
  118. );
  119. fnic_fcoe_send_vlan_req(fnic);
  120. return;
  121. }
  122. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  123. "up->down->up: Link up\n");
  124. fcoe_ctlr_link_up(&fnic->ctlr);
  125. } else {
  126. /* UP -> UP */
  127. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  128. fnic_fc_trace_set_data(
  129. fnic->lport->host->host_no, FNIC_FC_LE,
  130. "Link Status: UP_UP",
  131. strlen("Link Status: UP_UP"));
  132. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  133. "up->up\n");
  134. }
  135. }
  136. } else if (fnic->link_status) {
  137. /* DOWN -> UP */
  138. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  139. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  140. /* start FCoE VLAN discovery */
  141. fnic_fc_trace_set_data(fnic->lport->host->host_no,
  142. FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
  143. strlen("Link Status: DOWN_UP_VLAN"));
  144. fnic_fcoe_send_vlan_req(fnic);
  145. return;
  146. }
  147. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  148. "down->up: Link up\n");
  149. fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
  150. "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
  151. fcoe_ctlr_link_up(&fnic->ctlr);
  152. } else {
  153. /* UP -> DOWN */
  154. fnic->lport->host_stats.link_failure_count++;
  155. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  156. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  157. "up->down: Link down\n");
  158. fnic_fc_trace_set_data(
  159. fnic->lport->host->host_no, FNIC_FC_LE,
  160. "Link Status: UP_DOWN",
  161. strlen("Link Status: UP_DOWN"));
  162. if (fnic->config.flags & VFCF_FIP_CAPABLE) {
  163. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  164. "deleting fip-timer during link-down\n");
  165. del_timer_sync(&fnic->fip_timer);
  166. }
  167. fcoe_ctlr_link_down(&fnic->ctlr);
  168. }
  169. }
  170. /*
  171. * This function passes incoming fabric frames to libFC
  172. */
  173. void fnic_handle_frame(struct work_struct *work)
  174. {
  175. struct fnic *fnic = container_of(work, struct fnic, frame_work);
  176. struct fc_lport *lp = fnic->lport;
  177. unsigned long flags;
  178. struct sk_buff *skb;
  179. struct fc_frame *fp;
  180. while ((skb = skb_dequeue(&fnic->frame_queue))) {
  181. spin_lock_irqsave(&fnic->fnic_lock, flags);
  182. if (fnic->stop_rx_link_events) {
  183. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  184. dev_kfree_skb(skb);
  185. return;
  186. }
  187. fp = (struct fc_frame *)skb;
  188. /*
  189. * If we're in a transitional state, just re-queue and return.
  190. * The queue will be serviced when we get to a stable state.
  191. */
  192. if (fnic->state != FNIC_IN_FC_MODE &&
  193. fnic->state != FNIC_IN_ETH_MODE) {
  194. skb_queue_head(&fnic->frame_queue, skb);
  195. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  196. return;
  197. }
  198. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  199. fc_exch_recv(lp, fp);
  200. }
  201. }
  202. void fnic_fcoe_evlist_free(struct fnic *fnic)
  203. {
  204. struct fnic_event *fevt = NULL;
  205. struct fnic_event *next = NULL;
  206. unsigned long flags;
  207. spin_lock_irqsave(&fnic->fnic_lock, flags);
  208. if (list_empty(&fnic->evlist)) {
  209. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  210. return;
  211. }
  212. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  213. list_del(&fevt->list);
  214. kfree(fevt);
  215. }
  216. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  217. }
  218. void fnic_handle_event(struct work_struct *work)
  219. {
  220. struct fnic *fnic = container_of(work, struct fnic, event_work);
  221. struct fnic_event *fevt = NULL;
  222. struct fnic_event *next = NULL;
  223. unsigned long flags;
  224. spin_lock_irqsave(&fnic->fnic_lock, flags);
  225. if (list_empty(&fnic->evlist)) {
  226. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  227. return;
  228. }
  229. list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
  230. if (fnic->stop_rx_link_events) {
  231. list_del(&fevt->list);
  232. kfree(fevt);
  233. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  234. return;
  235. }
  236. /*
  237. * If we're in a transitional state, just re-queue and return.
  238. * The queue will be serviced when we get to a stable state.
  239. */
  240. if (fnic->state != FNIC_IN_FC_MODE &&
  241. fnic->state != FNIC_IN_ETH_MODE) {
  242. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  243. return;
  244. }
  245. list_del(&fevt->list);
  246. switch (fevt->event) {
  247. case FNIC_EVT_START_VLAN_DISC:
  248. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  249. fnic_fcoe_send_vlan_req(fnic);
  250. spin_lock_irqsave(&fnic->fnic_lock, flags);
  251. break;
  252. case FNIC_EVT_START_FCF_DISC:
  253. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  254. "Start FCF Discovery\n");
  255. fnic_fcoe_start_fcf_disc(fnic);
  256. break;
  257. default:
  258. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  259. "Unknown event 0x%x\n", fevt->event);
  260. break;
  261. }
  262. kfree(fevt);
  263. }
  264. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  265. }
  266. /**
  267. * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
  268. * @fip: The FCoE controller that received the frame
  269. * @skb: The received FIP frame
  270. *
  271. * Returns non-zero if the frame is rejected with unsupported cmd with
  272. * insufficient resource els explanation.
  273. */
  274. static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
  275. struct sk_buff *skb)
  276. {
  277. struct fc_lport *lport = fip->lp;
  278. struct fip_header *fiph;
  279. struct fc_frame_header *fh = NULL;
  280. struct fip_desc *desc;
  281. struct fip_encaps *els;
  282. u16 op;
  283. u8 els_op;
  284. u8 sub;
  285. size_t rlen;
  286. size_t dlen = 0;
  287. if (skb_linearize(skb))
  288. return 0;
  289. if (skb->len < sizeof(*fiph))
  290. return 0;
  291. fiph = (struct fip_header *)skb->data;
  292. op = ntohs(fiph->fip_op);
  293. sub = fiph->fip_subcode;
  294. if (op != FIP_OP_LS)
  295. return 0;
  296. if (sub != FIP_SC_REP)
  297. return 0;
  298. rlen = ntohs(fiph->fip_dl_len) * 4;
  299. if (rlen + sizeof(*fiph) > skb->len)
  300. return 0;
  301. desc = (struct fip_desc *)(fiph + 1);
  302. dlen = desc->fip_dlen * FIP_BPW;
  303. if (desc->fip_dtype == FIP_DT_FLOGI) {
  304. if (dlen < sizeof(*els) + sizeof(*fh) + 1)
  305. return 0;
  306. els = (struct fip_encaps *)desc;
  307. fh = (struct fc_frame_header *)(els + 1);
  308. if (!fh)
  309. return 0;
  310. /*
  311. * ELS command code, reason and explanation should be = Reject,
  312. * unsupported command and insufficient resource
  313. */
  314. els_op = *(u8 *)(fh + 1);
  315. if (els_op == ELS_LS_RJT) {
  316. shost_printk(KERN_INFO, lport->host,
  317. "Flogi Request Rejected by Switch\n");
  318. return 1;
  319. }
  320. shost_printk(KERN_INFO, lport->host,
  321. "Flogi Request Accepted by Switch\n");
  322. }
  323. return 0;
  324. }
  325. static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
  326. {
  327. struct fcoe_ctlr *fip = &fnic->ctlr;
  328. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  329. struct sk_buff *skb;
  330. char *eth_fr;
  331. struct fip_vlan *vlan;
  332. u64 vlan_tov;
  333. fnic_fcoe_reset_vlans(fnic);
  334. fnic->set_vlan(fnic, 0);
  335. if (printk_ratelimit())
  336. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  337. "Sending VLAN request...\n");
  338. skb = dev_alloc_skb(sizeof(struct fip_vlan));
  339. if (!skb)
  340. return;
  341. eth_fr = (char *)skb->data;
  342. vlan = (struct fip_vlan *)eth_fr;
  343. memset(vlan, 0, sizeof(*vlan));
  344. memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
  345. memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
  346. vlan->eth.h_proto = htons(ETH_P_FIP);
  347. vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
  348. vlan->fip.fip_op = htons(FIP_OP_VLAN);
  349. vlan->fip.fip_subcode = FIP_SC_VL_REQ;
  350. vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
  351. vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
  352. vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
  353. memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
  354. vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
  355. vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
  356. put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
  357. atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
  358. skb_put(skb, sizeof(*vlan));
  359. skb->protocol = htons(ETH_P_FIP);
  360. skb_reset_mac_header(skb);
  361. skb_reset_network_header(skb);
  362. fip->send(fip, skb);
  363. /* set a timer so that we can retry if there no response */
  364. vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
  365. mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
  366. }
  367. static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
  368. {
  369. struct fcoe_ctlr *fip = &fnic->ctlr;
  370. struct fip_header *fiph;
  371. struct fip_desc *desc;
  372. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  373. u16 vid;
  374. size_t rlen;
  375. size_t dlen;
  376. struct fcoe_vlan *vlan;
  377. u64 sol_time;
  378. unsigned long flags;
  379. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  380. "Received VLAN response...\n");
  381. fiph = (struct fip_header *) skb->data;
  382. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  383. "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
  384. ntohs(fiph->fip_op), fiph->fip_subcode);
  385. rlen = ntohs(fiph->fip_dl_len) * 4;
  386. fnic_fcoe_reset_vlans(fnic);
  387. spin_lock_irqsave(&fnic->vlans_lock, flags);
  388. desc = (struct fip_desc *)(fiph + 1);
  389. while (rlen > 0) {
  390. dlen = desc->fip_dlen * FIP_BPW;
  391. switch (desc->fip_dtype) {
  392. case FIP_DT_VLAN:
  393. vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
  394. shost_printk(KERN_INFO, fnic->lport->host,
  395. "process_vlan_resp: FIP VLAN %d\n", vid);
  396. vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
  397. if (!vlan) {
  398. /* retry from timer */
  399. spin_unlock_irqrestore(&fnic->vlans_lock,
  400. flags);
  401. goto out;
  402. }
  403. vlan->vid = vid & 0x0fff;
  404. vlan->state = FIP_VLAN_AVAIL;
  405. list_add_tail(&vlan->list, &fnic->vlans);
  406. break;
  407. }
  408. desc = (struct fip_desc *)((char *)desc + dlen);
  409. rlen -= dlen;
  410. }
  411. /* any VLAN descriptors present ? */
  412. if (list_empty(&fnic->vlans)) {
  413. /* retry from timer */
  414. atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
  415. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  416. "No VLAN descriptors in FIP VLAN response\n");
  417. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  418. goto out;
  419. }
  420. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  421. fnic->set_vlan(fnic, vlan->vid);
  422. vlan->state = FIP_VLAN_SENT; /* sent now */
  423. vlan->sol_count++;
  424. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  425. /* start the solicitation */
  426. fcoe_ctlr_link_up(fip);
  427. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  428. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  429. out:
  430. return;
  431. }
  432. static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
  433. {
  434. unsigned long flags;
  435. struct fcoe_vlan *vlan;
  436. u64 sol_time;
  437. spin_lock_irqsave(&fnic->vlans_lock, flags);
  438. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  439. fnic->set_vlan(fnic, vlan->vid);
  440. vlan->state = FIP_VLAN_SENT; /* sent now */
  441. vlan->sol_count = 1;
  442. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  443. /* start the solicitation */
  444. fcoe_ctlr_link_up(&fnic->ctlr);
  445. sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
  446. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  447. }
  448. static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
  449. {
  450. unsigned long flags;
  451. struct fcoe_vlan *fvlan;
  452. spin_lock_irqsave(&fnic->vlans_lock, flags);
  453. if (list_empty(&fnic->vlans)) {
  454. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  455. return -EINVAL;
  456. }
  457. fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  458. if (fvlan->state == FIP_VLAN_USED) {
  459. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  460. return 0;
  461. }
  462. if (fvlan->state == FIP_VLAN_SENT) {
  463. fvlan->state = FIP_VLAN_USED;
  464. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  465. return 0;
  466. }
  467. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  468. return -EINVAL;
  469. }
  470. static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
  471. {
  472. struct fnic_event *fevt;
  473. unsigned long flags;
  474. fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
  475. if (!fevt)
  476. return;
  477. fevt->fnic = fnic;
  478. fevt->event = ev;
  479. spin_lock_irqsave(&fnic->fnic_lock, flags);
  480. list_add_tail(&fevt->list, &fnic->evlist);
  481. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  482. schedule_work(&fnic->event_work);
  483. }
  484. static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
  485. {
  486. struct fip_header *fiph;
  487. int ret = 1;
  488. u16 op;
  489. u8 sub;
  490. if (!skb || !(skb->data))
  491. return -1;
  492. if (skb_linearize(skb))
  493. goto drop;
  494. fiph = (struct fip_header *)skb->data;
  495. op = ntohs(fiph->fip_op);
  496. sub = fiph->fip_subcode;
  497. if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
  498. goto drop;
  499. if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
  500. goto drop;
  501. if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
  502. if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
  503. goto drop;
  504. /* pass it on to fcoe */
  505. ret = 1;
  506. } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
  507. /* set the vlan as used */
  508. fnic_fcoe_process_vlan_resp(fnic, skb);
  509. ret = 0;
  510. } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
  511. /* received CVL request, restart vlan disc */
  512. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  513. /* pass it on to fcoe */
  514. ret = 1;
  515. }
  516. drop:
  517. return ret;
  518. }
  519. void fnic_handle_fip_frame(struct work_struct *work)
  520. {
  521. struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
  522. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  523. unsigned long flags;
  524. struct sk_buff *skb;
  525. struct ethhdr *eh;
  526. while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
  527. spin_lock_irqsave(&fnic->fnic_lock, flags);
  528. if (fnic->stop_rx_link_events) {
  529. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  530. dev_kfree_skb(skb);
  531. return;
  532. }
  533. /*
  534. * If we're in a transitional state, just re-queue and return.
  535. * The queue will be serviced when we get to a stable state.
  536. */
  537. if (fnic->state != FNIC_IN_FC_MODE &&
  538. fnic->state != FNIC_IN_ETH_MODE) {
  539. skb_queue_head(&fnic->fip_frame_queue, skb);
  540. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  541. return;
  542. }
  543. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  544. eh = (struct ethhdr *)skb->data;
  545. if (eh->h_proto == htons(ETH_P_FIP)) {
  546. skb_pull(skb, sizeof(*eh));
  547. if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
  548. dev_kfree_skb(skb);
  549. continue;
  550. }
  551. /*
  552. * If there's FLOGI rejects - clear all
  553. * fcf's & restart from scratch
  554. */
  555. if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
  556. atomic64_inc(
  557. &fnic_stats->vlan_stats.flogi_rejects);
  558. shost_printk(KERN_INFO, fnic->lport->host,
  559. "Trigger a Link down - VLAN Disc\n");
  560. fcoe_ctlr_link_down(&fnic->ctlr);
  561. /* start FCoE VLAN discovery */
  562. fnic_fcoe_send_vlan_req(fnic);
  563. dev_kfree_skb(skb);
  564. continue;
  565. }
  566. fcoe_ctlr_recv(&fnic->ctlr, skb);
  567. continue;
  568. }
  569. }
  570. }
  571. /**
  572. * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
  573. * @fnic: fnic instance.
  574. * @skb: Ethernet Frame.
  575. */
  576. static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
  577. {
  578. struct fc_frame *fp;
  579. struct ethhdr *eh;
  580. struct fcoe_hdr *fcoe_hdr;
  581. struct fcoe_crc_eof *ft;
  582. /*
  583. * Undo VLAN encapsulation if present.
  584. */
  585. eh = (struct ethhdr *)skb->data;
  586. if (eh->h_proto == htons(ETH_P_8021Q)) {
  587. memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
  588. eh = skb_pull(skb, VLAN_HLEN);
  589. skb_reset_mac_header(skb);
  590. }
  591. if (eh->h_proto == htons(ETH_P_FIP)) {
  592. if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
  593. printk(KERN_ERR "Dropped FIP frame, as firmware "
  594. "uses non-FIP mode, Enable FIP "
  595. "using UCSM\n");
  596. goto drop;
  597. }
  598. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  599. FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
  600. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  601. }
  602. skb_queue_tail(&fnic->fip_frame_queue, skb);
  603. queue_work(fnic_fip_queue, &fnic->fip_frame_work);
  604. return 1; /* let caller know packet was used */
  605. }
  606. if (eh->h_proto != htons(ETH_P_FCOE))
  607. goto drop;
  608. skb_set_network_header(skb, sizeof(*eh));
  609. skb_pull(skb, sizeof(*eh));
  610. fcoe_hdr = (struct fcoe_hdr *)skb->data;
  611. if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
  612. goto drop;
  613. fp = (struct fc_frame *)skb;
  614. fc_frame_init(fp);
  615. fr_sof(fp) = fcoe_hdr->fcoe_sof;
  616. skb_pull(skb, sizeof(struct fcoe_hdr));
  617. skb_reset_transport_header(skb);
  618. ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
  619. fr_eof(fp) = ft->fcoe_eof;
  620. skb_trim(skb, skb->len - sizeof(*ft));
  621. return 0;
  622. drop:
  623. dev_kfree_skb_irq(skb);
  624. return -1;
  625. }
  626. /**
  627. * fnic_update_mac_locked() - set data MAC address and filters.
  628. * @fnic: fnic instance.
  629. * @new: newly-assigned FCoE MAC address.
  630. *
  631. * Called with the fnic lock held.
  632. */
  633. void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
  634. {
  635. u8 *ctl = fnic->ctlr.ctl_src_addr;
  636. u8 *data = fnic->data_src_addr;
  637. if (is_zero_ether_addr(new))
  638. new = ctl;
  639. if (ether_addr_equal(data, new))
  640. return;
  641. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  642. "update_mac %pM\n", new);
  643. if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
  644. vnic_dev_del_addr(fnic->vdev, data);
  645. memcpy(data, new, ETH_ALEN);
  646. if (!ether_addr_equal(new, ctl))
  647. vnic_dev_add_addr(fnic->vdev, new);
  648. }
  649. /**
  650. * fnic_update_mac() - set data MAC address and filters.
  651. * @lport: local port.
  652. * @new: newly-assigned FCoE MAC address.
  653. */
  654. void fnic_update_mac(struct fc_lport *lport, u8 *new)
  655. {
  656. struct fnic *fnic = lport_priv(lport);
  657. spin_lock_irq(&fnic->fnic_lock);
  658. fnic_update_mac_locked(fnic, new);
  659. spin_unlock_irq(&fnic->fnic_lock);
  660. }
  661. /**
  662. * fnic_set_port_id() - set the port_ID after successful FLOGI.
  663. * @lport: local port.
  664. * @port_id: assigned FC_ID.
  665. * @fp: received frame containing the FLOGI accept or NULL.
  666. *
  667. * This is called from libfc when a new FC_ID has been assigned.
  668. * This causes us to reset the firmware to FC_MODE and setup the new MAC
  669. * address and FC_ID.
  670. *
  671. * It is also called with FC_ID 0 when we're logged off.
  672. *
  673. * If the FC_ID is due to point-to-point, fp may be NULL.
  674. */
  675. void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
  676. {
  677. struct fnic *fnic = lport_priv(lport);
  678. u8 *mac;
  679. int ret;
  680. FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num,
  681. "set port_id 0x%x fp 0x%p\n",
  682. port_id, fp);
  683. /*
  684. * If we're clearing the FC_ID, change to use the ctl_src_addr.
  685. * Set ethernet mode to send FLOGI.
  686. */
  687. if (!port_id) {
  688. fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
  689. fnic_set_eth_mode(fnic);
  690. return;
  691. }
  692. if (fp) {
  693. mac = fr_cb(fp)->granted_mac;
  694. if (is_zero_ether_addr(mac)) {
  695. /* non-FIP - FLOGI already accepted - ignore return */
  696. fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
  697. }
  698. fnic_update_mac(lport, mac);
  699. }
  700. /* Change state to reflect transition to FC mode */
  701. spin_lock_irq(&fnic->fnic_lock);
  702. if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
  703. fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
  704. else {
  705. FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num,
  706. "Unexpected fnic state: %s processing FLOGI response",
  707. fnic_state_to_str(fnic->state));
  708. spin_unlock_irq(&fnic->fnic_lock);
  709. return;
  710. }
  711. spin_unlock_irq(&fnic->fnic_lock);
  712. /*
  713. * Send FLOGI registration to firmware to set up FC mode.
  714. * The new address will be set up when registration completes.
  715. */
  716. ret = fnic_flogi_reg_handler(fnic, port_id);
  717. if (ret < 0) {
  718. spin_lock_irq(&fnic->fnic_lock);
  719. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
  720. fnic->state = FNIC_IN_ETH_MODE;
  721. spin_unlock_irq(&fnic->fnic_lock);
  722. }
  723. }
  724. static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
  725. *cq_desc, struct vnic_rq_buf *buf,
  726. int skipped __attribute__((unused)),
  727. void *opaque)
  728. {
  729. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  730. struct sk_buff *skb;
  731. struct fc_frame *fp;
  732. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  733. u8 type, color, eop, sop, ingress_port, vlan_stripped;
  734. u8 fcoe = 0, fcoe_sof, fcoe_eof;
  735. u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
  736. u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
  737. u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
  738. u8 fcs_ok = 1, packet_error = 0;
  739. u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
  740. u32 rss_hash;
  741. u16 exchange_id, tmpl;
  742. u8 sof = 0;
  743. u8 eof = 0;
  744. u32 fcp_bytes_written = 0;
  745. unsigned long flags;
  746. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  747. DMA_FROM_DEVICE);
  748. skb = buf->os_buf;
  749. fp = (struct fc_frame *)skb;
  750. buf->os_buf = NULL;
  751. cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
  752. if (type == CQ_DESC_TYPE_RQ_FCP) {
  753. cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
  754. &type, &color, &q_number, &completed_index,
  755. &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
  756. &tmpl, &fcp_bytes_written, &sof, &eof,
  757. &ingress_port, &packet_error,
  758. &fcoe_enc_error, &fcs_ok, &vlan_stripped,
  759. &vlan);
  760. skb_trim(skb, fcp_bytes_written);
  761. fr_sof(fp) = sof;
  762. fr_eof(fp) = eof;
  763. } else if (type == CQ_DESC_TYPE_RQ_ENET) {
  764. cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
  765. &type, &color, &q_number, &completed_index,
  766. &ingress_port, &fcoe, &eop, &sop,
  767. &rss_type, &csum_not_calc, &rss_hash,
  768. &bytes_written, &packet_error,
  769. &vlan_stripped, &vlan, &checksum,
  770. &fcoe_sof, &fcoe_fc_crc_ok,
  771. &fcoe_enc_error, &fcoe_eof,
  772. &tcp_udp_csum_ok, &udp, &tcp,
  773. &ipv4_csum_ok, &ipv6, &ipv4,
  774. &ipv4_fragment, &fcs_ok);
  775. skb_trim(skb, bytes_written);
  776. if (!fcs_ok) {
  777. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  778. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  779. "fcs error. dropping packet.\n");
  780. goto drop;
  781. }
  782. if (fnic_import_rq_eth_pkt(fnic, skb))
  783. return;
  784. } else {
  785. /* wrong CQ type*/
  786. shost_printk(KERN_ERR, fnic->lport->host,
  787. "fnic rq_cmpl wrong cq type x%x\n", type);
  788. goto drop;
  789. }
  790. if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
  791. atomic64_inc(&fnic_stats->misc_stats.frame_errors);
  792. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  793. "fnic rq_cmpl fcoe x%x fcsok x%x"
  794. " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
  795. " x%x\n",
  796. fcoe, fcs_ok, packet_error,
  797. fcoe_fc_crc_ok, fcoe_enc_error);
  798. goto drop;
  799. }
  800. spin_lock_irqsave(&fnic->fnic_lock, flags);
  801. if (fnic->stop_rx_link_events) {
  802. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  803. goto drop;
  804. }
  805. fr_dev(fp) = fnic->lport;
  806. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  807. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
  808. (char *)skb->data, skb->len)) != 0) {
  809. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  810. }
  811. skb_queue_tail(&fnic->frame_queue, skb);
  812. queue_work(fnic_event_queue, &fnic->frame_work);
  813. return;
  814. drop:
  815. dev_kfree_skb_irq(skb);
  816. }
  817. static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
  818. struct cq_desc *cq_desc, u8 type,
  819. u16 q_number, u16 completed_index,
  820. void *opaque)
  821. {
  822. struct fnic *fnic = vnic_dev_priv(vdev);
  823. vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
  824. VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
  825. NULL);
  826. return 0;
  827. }
  828. int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
  829. {
  830. unsigned int tot_rq_work_done = 0, cur_work_done;
  831. unsigned int i;
  832. int err;
  833. for (i = 0; i < fnic->rq_count; i++) {
  834. cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
  835. fnic_rq_cmpl_handler_cont,
  836. NULL);
  837. if (cur_work_done) {
  838. err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
  839. if (err)
  840. shost_printk(KERN_ERR, fnic->lport->host,
  841. "fnic_alloc_rq_frame can't alloc"
  842. " frame\n");
  843. }
  844. tot_rq_work_done += cur_work_done;
  845. }
  846. return tot_rq_work_done;
  847. }
  848. /*
  849. * This function is called once at init time to allocate and fill RQ
  850. * buffers. Subsequently, it is called in the interrupt context after RQ
  851. * buffer processing to replenish the buffers in the RQ
  852. */
  853. int fnic_alloc_rq_frame(struct vnic_rq *rq)
  854. {
  855. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  856. struct sk_buff *skb;
  857. u16 len;
  858. dma_addr_t pa;
  859. int r;
  860. len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
  861. skb = dev_alloc_skb(len);
  862. if (!skb) {
  863. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  864. "Unable to allocate RQ sk_buff\n");
  865. return -ENOMEM;
  866. }
  867. skb_reset_mac_header(skb);
  868. skb_reset_transport_header(skb);
  869. skb_reset_network_header(skb);
  870. skb_put(skb, len);
  871. pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
  872. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  873. r = -ENOMEM;
  874. printk(KERN_ERR "PCI mapping failed with error %d\n", r);
  875. goto free_skb;
  876. }
  877. fnic_queue_rq_desc(rq, skb, pa, len);
  878. return 0;
  879. free_skb:
  880. kfree_skb(skb);
  881. return r;
  882. }
  883. void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
  884. {
  885. struct fc_frame *fp = buf->os_buf;
  886. struct fnic *fnic = vnic_dev_priv(rq->vdev);
  887. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  888. DMA_FROM_DEVICE);
  889. dev_kfree_skb(fp_skb(fp));
  890. buf->os_buf = NULL;
  891. }
  892. /**
  893. * fnic_eth_send() - Send Ethernet frame.
  894. * @fip: fcoe_ctlr instance.
  895. * @skb: Ethernet Frame, FIP, without VLAN encapsulation.
  896. */
  897. void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
  898. {
  899. struct fnic *fnic = fnic_from_ctlr(fip);
  900. struct vnic_wq *wq = &fnic->wq[0];
  901. dma_addr_t pa;
  902. struct ethhdr *eth_hdr;
  903. struct vlan_ethhdr *vlan_hdr;
  904. unsigned long flags;
  905. if (!fnic->vlan_hw_insert) {
  906. eth_hdr = (struct ethhdr *)skb_mac_header(skb);
  907. vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
  908. memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
  909. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  910. vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
  911. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  912. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  913. FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
  914. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  915. }
  916. } else {
  917. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
  918. FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
  919. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  920. }
  921. }
  922. pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
  923. DMA_TO_DEVICE);
  924. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  925. printk(KERN_ERR "DMA mapping failed\n");
  926. goto free_skb;
  927. }
  928. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  929. if (!vnic_wq_desc_avail(wq))
  930. goto irq_restore;
  931. fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
  932. 0 /* hw inserts cos value */,
  933. fnic->vlan_id, 1);
  934. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  935. return;
  936. irq_restore:
  937. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  938. dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
  939. free_skb:
  940. kfree_skb(skb);
  941. }
  942. /*
  943. * Send FC frame.
  944. */
  945. static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
  946. {
  947. struct vnic_wq *wq = &fnic->wq[0];
  948. struct sk_buff *skb;
  949. dma_addr_t pa;
  950. struct ethhdr *eth_hdr;
  951. struct vlan_ethhdr *vlan_hdr;
  952. struct fcoe_hdr *fcoe_hdr;
  953. struct fc_frame_header *fh;
  954. u32 tot_len, eth_hdr_len;
  955. int ret = 0;
  956. unsigned long flags;
  957. fh = fc_frame_header_get(fp);
  958. skb = fp_skb(fp);
  959. if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
  960. fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
  961. return 0;
  962. if (!fnic->vlan_hw_insert) {
  963. eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
  964. vlan_hdr = skb_push(skb, eth_hdr_len);
  965. eth_hdr = (struct ethhdr *)vlan_hdr;
  966. vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
  967. vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
  968. vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
  969. fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
  970. } else {
  971. eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
  972. eth_hdr = skb_push(skb, eth_hdr_len);
  973. eth_hdr->h_proto = htons(ETH_P_FCOE);
  974. fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
  975. }
  976. if (fnic->ctlr.map_dest)
  977. fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
  978. else
  979. memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
  980. memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
  981. tot_len = skb->len;
  982. BUG_ON(tot_len % 4);
  983. memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
  984. fcoe_hdr->fcoe_sof = fr_sof(fp);
  985. if (FC_FCOE_VER)
  986. FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
  987. pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
  988. if (dma_mapping_error(&fnic->pdev->dev, pa)) {
  989. ret = -ENOMEM;
  990. printk(KERN_ERR "DMA map failed with error %d\n", ret);
  991. goto free_skb_on_err;
  992. }
  993. if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
  994. (char *)eth_hdr, tot_len)) != 0) {
  995. printk(KERN_ERR "fnic ctlr frame trace error!!!");
  996. }
  997. spin_lock_irqsave(&fnic->wq_lock[0], flags);
  998. if (!vnic_wq_desc_avail(wq)) {
  999. dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
  1000. ret = -1;
  1001. goto irq_restore;
  1002. }
  1003. fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
  1004. 0 /* hw inserts cos value */,
  1005. fnic->vlan_id, 1, 1, 1);
  1006. irq_restore:
  1007. spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
  1008. free_skb_on_err:
  1009. if (ret)
  1010. dev_kfree_skb_any(fp_skb(fp));
  1011. return ret;
  1012. }
  1013. /*
  1014. * fnic_send
  1015. * Routine to send a raw frame
  1016. */
  1017. int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
  1018. {
  1019. struct fnic *fnic = lport_priv(lp);
  1020. unsigned long flags;
  1021. if (fnic->in_remove) {
  1022. dev_kfree_skb(fp_skb(fp));
  1023. return -1;
  1024. }
  1025. /*
  1026. * Queue frame if in a transitional state.
  1027. * This occurs while registering the Port_ID / MAC address after FLOGI.
  1028. */
  1029. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1030. if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
  1031. skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
  1032. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1033. return 0;
  1034. }
  1035. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1036. return fnic_send_frame(fnic, fp);
  1037. }
  1038. /**
  1039. * fnic_flush_tx() - send queued frames.
  1040. * @work: pointer to work element
  1041. *
  1042. * Send frames that were waiting to go out in FC or Ethernet mode.
  1043. * Whenever changing modes we purge queued frames, so these frames should
  1044. * be queued for the stable mode that we're in, either FC or Ethernet.
  1045. *
  1046. * Called without fnic_lock held.
  1047. */
  1048. void fnic_flush_tx(struct work_struct *work)
  1049. {
  1050. struct fnic *fnic = container_of(work, struct fnic, flush_work);
  1051. struct sk_buff *skb;
  1052. struct fc_frame *fp;
  1053. while ((skb = skb_dequeue(&fnic->tx_queue))) {
  1054. fp = (struct fc_frame *)skb;
  1055. fnic_send_frame(fnic, fp);
  1056. }
  1057. }
  1058. /**
  1059. * fnic_set_eth_mode() - put fnic into ethernet mode.
  1060. * @fnic: fnic device
  1061. *
  1062. * Called without fnic lock held.
  1063. */
  1064. static void fnic_set_eth_mode(struct fnic *fnic)
  1065. {
  1066. unsigned long flags;
  1067. enum fnic_state old_state;
  1068. int ret;
  1069. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1070. again:
  1071. old_state = fnic->state;
  1072. switch (old_state) {
  1073. case FNIC_IN_FC_MODE:
  1074. case FNIC_IN_ETH_TRANS_FC_MODE:
  1075. default:
  1076. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1077. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1078. ret = fnic_fw_reset_handler(fnic);
  1079. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1080. if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
  1081. goto again;
  1082. if (ret)
  1083. fnic->state = old_state;
  1084. break;
  1085. case FNIC_IN_FC_TRANS_ETH_MODE:
  1086. case FNIC_IN_ETH_MODE:
  1087. break;
  1088. }
  1089. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1090. }
  1091. static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
  1092. struct cq_desc *cq_desc,
  1093. struct vnic_wq_buf *buf, void *opaque)
  1094. {
  1095. struct sk_buff *skb = buf->os_buf;
  1096. struct fc_frame *fp = (struct fc_frame *)skb;
  1097. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1098. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  1099. DMA_TO_DEVICE);
  1100. dev_kfree_skb_irq(fp_skb(fp));
  1101. buf->os_buf = NULL;
  1102. }
  1103. static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
  1104. struct cq_desc *cq_desc, u8 type,
  1105. u16 q_number, u16 completed_index,
  1106. void *opaque)
  1107. {
  1108. struct fnic *fnic = vnic_dev_priv(vdev);
  1109. unsigned long flags;
  1110. spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
  1111. vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
  1112. fnic_wq_complete_frame_send, NULL);
  1113. spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
  1114. return 0;
  1115. }
  1116. int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
  1117. {
  1118. unsigned int wq_work_done = 0;
  1119. unsigned int i;
  1120. for (i = 0; i < fnic->raw_wq_count; i++) {
  1121. wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
  1122. work_to_do,
  1123. fnic_wq_cmpl_handler_cont,
  1124. NULL);
  1125. }
  1126. return wq_work_done;
  1127. }
  1128. void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
  1129. {
  1130. struct fc_frame *fp = buf->os_buf;
  1131. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  1132. dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
  1133. DMA_TO_DEVICE);
  1134. dev_kfree_skb(fp_skb(fp));
  1135. buf->os_buf = NULL;
  1136. }
  1137. void fnic_fcoe_reset_vlans(struct fnic *fnic)
  1138. {
  1139. unsigned long flags;
  1140. struct fcoe_vlan *vlan;
  1141. struct fcoe_vlan *next;
  1142. /*
  1143. * indicate a link down to fcoe so that all fcf's are free'd
  1144. * might not be required since we did this before sending vlan
  1145. * discovery request
  1146. */
  1147. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1148. if (!list_empty(&fnic->vlans)) {
  1149. list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
  1150. list_del(&vlan->list);
  1151. kfree(vlan);
  1152. }
  1153. }
  1154. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1155. }
  1156. void fnic_handle_fip_timer(struct fnic *fnic)
  1157. {
  1158. unsigned long flags;
  1159. struct fcoe_vlan *vlan;
  1160. struct fnic_stats *fnic_stats = &fnic->fnic_stats;
  1161. u64 sol_time;
  1162. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1163. if (fnic->stop_rx_link_events) {
  1164. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1165. return;
  1166. }
  1167. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1168. if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
  1169. return;
  1170. spin_lock_irqsave(&fnic->vlans_lock, flags);
  1171. if (list_empty(&fnic->vlans)) {
  1172. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1173. /* no vlans available, try again */
  1174. if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
  1175. if (printk_ratelimit())
  1176. shost_printk(KERN_DEBUG, fnic->lport->host,
  1177. "Start VLAN Discovery\n");
  1178. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1179. return;
  1180. }
  1181. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
  1182. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1183. "fip_timer: vlan %d state %d sol_count %d\n",
  1184. vlan->vid, vlan->state, vlan->sol_count);
  1185. switch (vlan->state) {
  1186. case FIP_VLAN_USED:
  1187. FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num,
  1188. "FIP VLAN is selected for FC transaction\n");
  1189. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1190. break;
  1191. case FIP_VLAN_FAILED:
  1192. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1193. /* if all vlans are in failed state, restart vlan disc */
  1194. if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
  1195. if (printk_ratelimit())
  1196. shost_printk(KERN_DEBUG, fnic->lport->host,
  1197. "Start VLAN Discovery\n");
  1198. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1199. break;
  1200. case FIP_VLAN_SENT:
  1201. if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
  1202. /*
  1203. * no response on this vlan, remove from the list.
  1204. * Try the next vlan
  1205. */
  1206. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1207. "Dequeue this VLAN ID %d from list\n",
  1208. vlan->vid);
  1209. list_del(&vlan->list);
  1210. kfree(vlan);
  1211. vlan = NULL;
  1212. if (list_empty(&fnic->vlans)) {
  1213. /* we exhausted all vlans, restart vlan disc */
  1214. spin_unlock_irqrestore(&fnic->vlans_lock,
  1215. flags);
  1216. FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num,
  1217. "fip_timer: vlan list empty, "
  1218. "trigger vlan disc\n");
  1219. fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
  1220. return;
  1221. }
  1222. /* check the next vlan */
  1223. vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
  1224. list);
  1225. fnic->set_vlan(fnic, vlan->vid);
  1226. vlan->state = FIP_VLAN_SENT; /* sent now */
  1227. }
  1228. spin_unlock_irqrestore(&fnic->vlans_lock, flags);
  1229. atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
  1230. vlan->sol_count++;
  1231. sol_time = jiffies + msecs_to_jiffies
  1232. (FCOE_CTLR_START_DELAY);
  1233. mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
  1234. break;
  1235. }
  1236. }