cmf.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Linux on zSeries Channel Measurement Facility support
  4. *
  5. * Copyright IBM Corp. 2000, 2006
  6. *
  7. * Authors: Arnd Bergmann <arndb@de.ibm.com>
  8. * Cornelia Huck <cornelia.huck@de.ibm.com>
  9. *
  10. * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
  11. */
  12. #define KMSG_COMPONENT "cio"
  13. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14. #include <linux/bootmem.h>
  15. #include <linux/device.h>
  16. #include <linux/init.h>
  17. #include <linux/list.h>
  18. #include <linux/export.h>
  19. #include <linux/moduleparam.h>
  20. #include <linux/slab.h>
  21. #include <linux/timex.h> /* get_tod_clock() */
  22. #include <asm/ccwdev.h>
  23. #include <asm/cio.h>
  24. #include <asm/cmb.h>
  25. #include <asm/div64.h>
  26. #include "cio.h"
  27. #include "css.h"
  28. #include "device.h"
  29. #include "ioasm.h"
  30. #include "chsc.h"
  31. /*
  32. * parameter to enable cmf during boot, possible uses are:
  33. * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be
  34. * used on any subchannel
  35. * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure
  36. * <num> subchannel, where <num> is an integer
  37. * between 1 and 65535, default is 1024
  38. */
  39. #define ARGSTRING "s390cmf"
  40. /* indices for READCMB */
  41. enum cmb_index {
  42. avg_utilization = -1,
  43. /* basic and exended format: */
  44. cmb_ssch_rsch_count = 0,
  45. cmb_sample_count,
  46. cmb_device_connect_time,
  47. cmb_function_pending_time,
  48. cmb_device_disconnect_time,
  49. cmb_control_unit_queuing_time,
  50. cmb_device_active_only_time,
  51. /* extended format only: */
  52. cmb_device_busy_time,
  53. cmb_initial_command_response_time,
  54. };
  55. /**
  56. * enum cmb_format - types of supported measurement block formats
  57. *
  58. * @CMF_BASIC: traditional channel measurement blocks supported
  59. * by all machines that we run on
  60. * @CMF_EXTENDED: improved format that was introduced with the z990
  61. * machine
  62. * @CMF_AUTODETECT: default: use extended format when running on a machine
  63. * supporting extended format, otherwise fall back to
  64. * basic format
  65. */
  66. enum cmb_format {
  67. CMF_BASIC,
  68. CMF_EXTENDED,
  69. CMF_AUTODETECT = -1,
  70. };
  71. /*
  72. * format - actual format for all measurement blocks
  73. *
  74. * The format module parameter can be set to a value of 0 (zero)
  75. * or 1, indicating basic or extended format as described for
  76. * enum cmb_format.
  77. */
  78. static int format = CMF_AUTODETECT;
  79. module_param(format, bint, 0444);
  80. /**
  81. * struct cmb_operations - functions to use depending on cmb_format
  82. *
  83. * Most of these functions operate on a struct ccw_device. There is only
  84. * one instance of struct cmb_operations because the format of the measurement
  85. * data is guaranteed to be the same for every ccw_device.
  86. *
  87. * @alloc: allocate memory for a channel measurement block,
  88. * either with the help of a special pool or with kmalloc
  89. * @free: free memory allocated with @alloc
  90. * @set: enable or disable measurement
  91. * @read: read a measurement entry at an index
  92. * @readall: read a measurement block in a common format
  93. * @reset: clear the data in the associated measurement block and
  94. * reset its time stamp
  95. */
  96. struct cmb_operations {
  97. int (*alloc) (struct ccw_device *);
  98. void (*free) (struct ccw_device *);
  99. int (*set) (struct ccw_device *, u32);
  100. u64 (*read) (struct ccw_device *, int);
  101. int (*readall)(struct ccw_device *, struct cmbdata *);
  102. void (*reset) (struct ccw_device *);
  103. /* private: */
  104. struct attribute_group *attr_group;
  105. };
  106. static struct cmb_operations *cmbops;
  107. struct cmb_data {
  108. void *hw_block; /* Pointer to block updated by hardware */
  109. void *last_block; /* Last changed block copied from hardware block */
  110. int size; /* Size of hw_block and last_block */
  111. unsigned long long last_update; /* when last_block was updated */
  112. };
  113. /*
  114. * Our user interface is designed in terms of nanoseconds,
  115. * while the hardware measures total times in its own
  116. * unit.
  117. */
  118. static inline u64 time_to_nsec(u32 value)
  119. {
  120. return ((u64)value) * 128000ull;
  121. }
  122. /*
  123. * Users are usually interested in average times,
  124. * not accumulated time.
  125. * This also helps us with atomicity problems
  126. * when reading sinlge values.
  127. */
  128. static inline u64 time_to_avg_nsec(u32 value, u32 count)
  129. {
  130. u64 ret;
  131. /* no samples yet, avoid division by 0 */
  132. if (count == 0)
  133. return 0;
  134. /* value comes in units of 128 µsec */
  135. ret = time_to_nsec(value);
  136. do_div(ret, count);
  137. return ret;
  138. }
  139. #define CMF_OFF 0
  140. #define CMF_ON 2
  141. /*
  142. * Activate or deactivate the channel monitor. When area is NULL,
  143. * the monitor is deactivated. The channel monitor needs to
  144. * be active in order to measure subchannels, which also need
  145. * to be enabled.
  146. */
  147. static inline void cmf_activate(void *area, unsigned int onoff)
  148. {
  149. register void * __gpr2 asm("2");
  150. register long __gpr1 asm("1");
  151. __gpr2 = area;
  152. __gpr1 = onoff;
  153. /* activate channel measurement */
  154. asm("schm" : : "d" (__gpr2), "d" (__gpr1) );
  155. }
  156. static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc,
  157. unsigned long address)
  158. {
  159. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  160. int ret;
  161. sch->config.mme = mme;
  162. sch->config.mbfc = mbfc;
  163. /* address can be either a block address or a block index */
  164. if (mbfc)
  165. sch->config.mba = address;
  166. else
  167. sch->config.mbi = address;
  168. ret = cio_commit_config(sch);
  169. if (!mme && ret == -ENODEV) {
  170. /*
  171. * The task was to disable measurement block updates but
  172. * the subchannel is already gone. Report success.
  173. */
  174. ret = 0;
  175. }
  176. return ret;
  177. }
  178. struct set_schib_struct {
  179. u32 mme;
  180. int mbfc;
  181. unsigned long address;
  182. wait_queue_head_t wait;
  183. int ret;
  184. };
  185. #define CMF_PENDING 1
  186. #define SET_SCHIB_TIMEOUT (10 * HZ)
  187. static int set_schib_wait(struct ccw_device *cdev, u32 mme,
  188. int mbfc, unsigned long address)
  189. {
  190. struct set_schib_struct set_data;
  191. int ret = -ENODEV;
  192. spin_lock_irq(cdev->ccwlock);
  193. if (!cdev->private->cmb)
  194. goto out;
  195. ret = set_schib(cdev, mme, mbfc, address);
  196. if (ret != -EBUSY)
  197. goto out;
  198. /* if the device is not online, don't even try again */
  199. if (cdev->private->state != DEV_STATE_ONLINE)
  200. goto out;
  201. init_waitqueue_head(&set_data.wait);
  202. set_data.mme = mme;
  203. set_data.mbfc = mbfc;
  204. set_data.address = address;
  205. set_data.ret = CMF_PENDING;
  206. cdev->private->state = DEV_STATE_CMFCHANGE;
  207. cdev->private->cmb_wait = &set_data;
  208. spin_unlock_irq(cdev->ccwlock);
  209. ret = wait_event_interruptible_timeout(set_data.wait,
  210. set_data.ret != CMF_PENDING,
  211. SET_SCHIB_TIMEOUT);
  212. spin_lock_irq(cdev->ccwlock);
  213. if (ret <= 0) {
  214. if (set_data.ret == CMF_PENDING) {
  215. set_data.ret = (ret == 0) ? -ETIME : ret;
  216. if (cdev->private->state == DEV_STATE_CMFCHANGE)
  217. cdev->private->state = DEV_STATE_ONLINE;
  218. }
  219. }
  220. cdev->private->cmb_wait = NULL;
  221. ret = set_data.ret;
  222. out:
  223. spin_unlock_irq(cdev->ccwlock);
  224. return ret;
  225. }
  226. void retry_set_schib(struct ccw_device *cdev)
  227. {
  228. struct set_schib_struct *set_data = cdev->private->cmb_wait;
  229. if (!set_data)
  230. return;
  231. set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc,
  232. set_data->address);
  233. wake_up(&set_data->wait);
  234. }
  235. static int cmf_copy_block(struct ccw_device *cdev)
  236. {
  237. struct subchannel *sch = to_subchannel(cdev->dev.parent);
  238. struct cmb_data *cmb_data;
  239. void *hw_block;
  240. if (cio_update_schib(sch))
  241. return -ENODEV;
  242. if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) {
  243. /* Don't copy if a start function is in progress. */
  244. if ((!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_SUSPENDED)) &&
  245. (scsw_actl(&sch->schib.scsw) &
  246. (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) &&
  247. (!(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_SEC_STATUS)))
  248. return -EBUSY;
  249. }
  250. cmb_data = cdev->private->cmb;
  251. hw_block = cmb_data->hw_block;
  252. memcpy(cmb_data->last_block, hw_block, cmb_data->size);
  253. cmb_data->last_update = get_tod_clock();
  254. return 0;
  255. }
  256. struct copy_block_struct {
  257. wait_queue_head_t wait;
  258. int ret;
  259. };
  260. static int cmf_cmb_copy_wait(struct ccw_device *cdev)
  261. {
  262. struct copy_block_struct copy_block;
  263. int ret = -ENODEV;
  264. spin_lock_irq(cdev->ccwlock);
  265. if (!cdev->private->cmb)
  266. goto out;
  267. ret = cmf_copy_block(cdev);
  268. if (ret != -EBUSY)
  269. goto out;
  270. if (cdev->private->state != DEV_STATE_ONLINE)
  271. goto out;
  272. init_waitqueue_head(&copy_block.wait);
  273. copy_block.ret = CMF_PENDING;
  274. cdev->private->state = DEV_STATE_CMFUPDATE;
  275. cdev->private->cmb_wait = &copy_block;
  276. spin_unlock_irq(cdev->ccwlock);
  277. ret = wait_event_interruptible(copy_block.wait,
  278. copy_block.ret != CMF_PENDING);
  279. spin_lock_irq(cdev->ccwlock);
  280. if (ret) {
  281. if (copy_block.ret == CMF_PENDING) {
  282. copy_block.ret = -ERESTARTSYS;
  283. if (cdev->private->state == DEV_STATE_CMFUPDATE)
  284. cdev->private->state = DEV_STATE_ONLINE;
  285. }
  286. }
  287. cdev->private->cmb_wait = NULL;
  288. ret = copy_block.ret;
  289. out:
  290. spin_unlock_irq(cdev->ccwlock);
  291. return ret;
  292. }
  293. void cmf_retry_copy_block(struct ccw_device *cdev)
  294. {
  295. struct copy_block_struct *copy_block = cdev->private->cmb_wait;
  296. if (!copy_block)
  297. return;
  298. copy_block->ret = cmf_copy_block(cdev);
  299. wake_up(&copy_block->wait);
  300. }
  301. static void cmf_generic_reset(struct ccw_device *cdev)
  302. {
  303. struct cmb_data *cmb_data;
  304. spin_lock_irq(cdev->ccwlock);
  305. cmb_data = cdev->private->cmb;
  306. if (cmb_data) {
  307. memset(cmb_data->last_block, 0, cmb_data->size);
  308. /*
  309. * Need to reset hw block as well to make the hardware start
  310. * from 0 again.
  311. */
  312. memset(cmb_data->hw_block, 0, cmb_data->size);
  313. cmb_data->last_update = 0;
  314. }
  315. cdev->private->cmb_start_time = get_tod_clock();
  316. spin_unlock_irq(cdev->ccwlock);
  317. }
  318. /**
  319. * struct cmb_area - container for global cmb data
  320. *
  321. * @mem: pointer to CMBs (only in basic measurement mode)
  322. * @list: contains a linked list of all subchannels
  323. * @num_channels: number of channels to be measured
  324. * @lock: protect concurrent access to @mem and @list
  325. */
  326. struct cmb_area {
  327. struct cmb *mem;
  328. struct list_head list;
  329. int num_channels;
  330. spinlock_t lock;
  331. };
  332. static struct cmb_area cmb_area = {
  333. .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
  334. .list = LIST_HEAD_INIT(cmb_area.list),
  335. .num_channels = 1024,
  336. };
  337. /* ****** old style CMB handling ********/
  338. /*
  339. * Basic channel measurement blocks are allocated in one contiguous
  340. * block of memory, which can not be moved as long as any channel
  341. * is active. Therefore, a maximum number of subchannels needs to
  342. * be defined somewhere. This is a module parameter, defaulting to
  343. * a reasonable value of 1024, or 32 kb of memory.
  344. * Current kernels don't allow kmalloc with more than 128kb, so the
  345. * maximum is 4096.
  346. */
  347. module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
  348. /**
  349. * struct cmb - basic channel measurement block
  350. * @ssch_rsch_count: number of ssch and rsch
  351. * @sample_count: number of samples
  352. * @device_connect_time: time of device connect
  353. * @function_pending_time: time of function pending
  354. * @device_disconnect_time: time of device disconnect
  355. * @control_unit_queuing_time: time of control unit queuing
  356. * @device_active_only_time: time of device active only
  357. * @reserved: unused in basic measurement mode
  358. *
  359. * The measurement block as used by the hardware. The fields are described
  360. * further in z/Architecture Principles of Operation, chapter 17.
  361. *
  362. * The cmb area made up from these blocks must be a contiguous array and may
  363. * not be reallocated or freed.
  364. * Only one cmb area can be present in the system.
  365. */
  366. struct cmb {
  367. u16 ssch_rsch_count;
  368. u16 sample_count;
  369. u32 device_connect_time;
  370. u32 function_pending_time;
  371. u32 device_disconnect_time;
  372. u32 control_unit_queuing_time;
  373. u32 device_active_only_time;
  374. u32 reserved[2];
  375. };
  376. /*
  377. * Insert a single device into the cmb_area list.
  378. * Called with cmb_area.lock held from alloc_cmb.
  379. */
  380. static int alloc_cmb_single(struct ccw_device *cdev,
  381. struct cmb_data *cmb_data)
  382. {
  383. struct cmb *cmb;
  384. struct ccw_device_private *node;
  385. int ret;
  386. spin_lock_irq(cdev->ccwlock);
  387. if (!list_empty(&cdev->private->cmb_list)) {
  388. ret = -EBUSY;
  389. goto out;
  390. }
  391. /*
  392. * Find first unused cmb in cmb_area.mem.
  393. * This is a little tricky: cmb_area.list
  394. * remains sorted by ->cmb->hw_data pointers.
  395. */
  396. cmb = cmb_area.mem;
  397. list_for_each_entry(node, &cmb_area.list, cmb_list) {
  398. struct cmb_data *data;
  399. data = node->cmb;
  400. if ((struct cmb*)data->hw_block > cmb)
  401. break;
  402. cmb++;
  403. }
  404. if (cmb - cmb_area.mem >= cmb_area.num_channels) {
  405. ret = -ENOMEM;
  406. goto out;
  407. }
  408. /* insert new cmb */
  409. list_add_tail(&cdev->private->cmb_list, &node->cmb_list);
  410. cmb_data->hw_block = cmb;
  411. cdev->private->cmb = cmb_data;
  412. ret = 0;
  413. out:
  414. spin_unlock_irq(cdev->ccwlock);
  415. return ret;
  416. }
  417. static int alloc_cmb(struct ccw_device *cdev)
  418. {
  419. int ret;
  420. struct cmb *mem;
  421. ssize_t size;
  422. struct cmb_data *cmb_data;
  423. /* Allocate private cmb_data. */
  424. cmb_data = kzalloc(sizeof(struct cmb_data), GFP_KERNEL);
  425. if (!cmb_data)
  426. return -ENOMEM;
  427. cmb_data->last_block = kzalloc(sizeof(struct cmb), GFP_KERNEL);
  428. if (!cmb_data->last_block) {
  429. kfree(cmb_data);
  430. return -ENOMEM;
  431. }
  432. cmb_data->size = sizeof(struct cmb);
  433. spin_lock(&cmb_area.lock);
  434. if (!cmb_area.mem) {
  435. /* there is no user yet, so we need a new area */
  436. size = sizeof(struct cmb) * cmb_area.num_channels;
  437. WARN_ON(!list_empty(&cmb_area.list));
  438. spin_unlock(&cmb_area.lock);
  439. mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
  440. get_order(size));
  441. spin_lock(&cmb_area.lock);
  442. if (cmb_area.mem) {
  443. /* ok, another thread was faster */
  444. free_pages((unsigned long)mem, get_order(size));
  445. } else if (!mem) {
  446. /* no luck */
  447. ret = -ENOMEM;
  448. goto out;
  449. } else {
  450. /* everything ok */
  451. memset(mem, 0, size);
  452. cmb_area.mem = mem;
  453. cmf_activate(cmb_area.mem, CMF_ON);
  454. }
  455. }
  456. /* do the actual allocation */
  457. ret = alloc_cmb_single(cdev, cmb_data);
  458. out:
  459. spin_unlock(&cmb_area.lock);
  460. if (ret) {
  461. kfree(cmb_data->last_block);
  462. kfree(cmb_data);
  463. }
  464. return ret;
  465. }
  466. static void free_cmb(struct ccw_device *cdev)
  467. {
  468. struct ccw_device_private *priv;
  469. struct cmb_data *cmb_data;
  470. spin_lock(&cmb_area.lock);
  471. spin_lock_irq(cdev->ccwlock);
  472. priv = cdev->private;
  473. cmb_data = priv->cmb;
  474. priv->cmb = NULL;
  475. if (cmb_data)
  476. kfree(cmb_data->last_block);
  477. kfree(cmb_data);
  478. list_del_init(&priv->cmb_list);
  479. if (list_empty(&cmb_area.list)) {
  480. ssize_t size;
  481. size = sizeof(struct cmb) * cmb_area.num_channels;
  482. cmf_activate(NULL, CMF_OFF);
  483. free_pages((unsigned long)cmb_area.mem, get_order(size));
  484. cmb_area.mem = NULL;
  485. }
  486. spin_unlock_irq(cdev->ccwlock);
  487. spin_unlock(&cmb_area.lock);
  488. }
  489. static int set_cmb(struct ccw_device *cdev, u32 mme)
  490. {
  491. u16 offset;
  492. struct cmb_data *cmb_data;
  493. unsigned long flags;
  494. spin_lock_irqsave(cdev->ccwlock, flags);
  495. if (!cdev->private->cmb) {
  496. spin_unlock_irqrestore(cdev->ccwlock, flags);
  497. return -EINVAL;
  498. }
  499. cmb_data = cdev->private->cmb;
  500. offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
  501. spin_unlock_irqrestore(cdev->ccwlock, flags);
  502. return set_schib_wait(cdev, mme, 0, offset);
  503. }
  504. /* calculate utilization in 0.1 percent units */
  505. static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time,
  506. u64 device_disconnect_time, u64 start_time)
  507. {
  508. u64 utilization, elapsed_time;
  509. utilization = time_to_nsec(device_connect_time +
  510. function_pending_time +
  511. device_disconnect_time);
  512. elapsed_time = get_tod_clock() - start_time;
  513. elapsed_time = tod_to_ns(elapsed_time);
  514. elapsed_time /= 1000;
  515. return elapsed_time ? (utilization / elapsed_time) : 0;
  516. }
  517. static u64 read_cmb(struct ccw_device *cdev, int index)
  518. {
  519. struct cmb_data *cmb_data;
  520. unsigned long flags;
  521. struct cmb *cmb;
  522. u64 ret = 0;
  523. u32 val;
  524. spin_lock_irqsave(cdev->ccwlock, flags);
  525. cmb_data = cdev->private->cmb;
  526. if (!cmb_data)
  527. goto out;
  528. cmb = cmb_data->hw_block;
  529. switch (index) {
  530. case avg_utilization:
  531. ret = __cmb_utilization(cmb->device_connect_time,
  532. cmb->function_pending_time,
  533. cmb->device_disconnect_time,
  534. cdev->private->cmb_start_time);
  535. goto out;
  536. case cmb_ssch_rsch_count:
  537. ret = cmb->ssch_rsch_count;
  538. goto out;
  539. case cmb_sample_count:
  540. ret = cmb->sample_count;
  541. goto out;
  542. case cmb_device_connect_time:
  543. val = cmb->device_connect_time;
  544. break;
  545. case cmb_function_pending_time:
  546. val = cmb->function_pending_time;
  547. break;
  548. case cmb_device_disconnect_time:
  549. val = cmb->device_disconnect_time;
  550. break;
  551. case cmb_control_unit_queuing_time:
  552. val = cmb->control_unit_queuing_time;
  553. break;
  554. case cmb_device_active_only_time:
  555. val = cmb->device_active_only_time;
  556. break;
  557. default:
  558. goto out;
  559. }
  560. ret = time_to_avg_nsec(val, cmb->sample_count);
  561. out:
  562. spin_unlock_irqrestore(cdev->ccwlock, flags);
  563. return ret;
  564. }
  565. static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data)
  566. {
  567. struct cmb *cmb;
  568. struct cmb_data *cmb_data;
  569. u64 time;
  570. unsigned long flags;
  571. int ret;
  572. ret = cmf_cmb_copy_wait(cdev);
  573. if (ret < 0)
  574. return ret;
  575. spin_lock_irqsave(cdev->ccwlock, flags);
  576. cmb_data = cdev->private->cmb;
  577. if (!cmb_data) {
  578. ret = -ENODEV;
  579. goto out;
  580. }
  581. if (cmb_data->last_update == 0) {
  582. ret = -EAGAIN;
  583. goto out;
  584. }
  585. cmb = cmb_data->last_block;
  586. time = cmb_data->last_update - cdev->private->cmb_start_time;
  587. memset(data, 0, sizeof(struct cmbdata));
  588. /* we only know values before device_busy_time */
  589. data->size = offsetof(struct cmbdata, device_busy_time);
  590. data->elapsed_time = tod_to_ns(time);
  591. /* copy data to new structure */
  592. data->ssch_rsch_count = cmb->ssch_rsch_count;
  593. data->sample_count = cmb->sample_count;
  594. /* time fields are converted to nanoseconds while copying */
  595. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  596. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  597. data->device_disconnect_time =
  598. time_to_nsec(cmb->device_disconnect_time);
  599. data->control_unit_queuing_time
  600. = time_to_nsec(cmb->control_unit_queuing_time);
  601. data->device_active_only_time
  602. = time_to_nsec(cmb->device_active_only_time);
  603. ret = 0;
  604. out:
  605. spin_unlock_irqrestore(cdev->ccwlock, flags);
  606. return ret;
  607. }
  608. static void reset_cmb(struct ccw_device *cdev)
  609. {
  610. cmf_generic_reset(cdev);
  611. }
  612. static int cmf_enabled(struct ccw_device *cdev)
  613. {
  614. int enabled;
  615. spin_lock_irq(cdev->ccwlock);
  616. enabled = !!cdev->private->cmb;
  617. spin_unlock_irq(cdev->ccwlock);
  618. return enabled;
  619. }
  620. static struct attribute_group cmf_attr_group;
  621. static struct cmb_operations cmbops_basic = {
  622. .alloc = alloc_cmb,
  623. .free = free_cmb,
  624. .set = set_cmb,
  625. .read = read_cmb,
  626. .readall = readall_cmb,
  627. .reset = reset_cmb,
  628. .attr_group = &cmf_attr_group,
  629. };
  630. /* ******** extended cmb handling ********/
  631. /**
  632. * struct cmbe - extended channel measurement block
  633. * @ssch_rsch_count: number of ssch and rsch
  634. * @sample_count: number of samples
  635. * @device_connect_time: time of device connect
  636. * @function_pending_time: time of function pending
  637. * @device_disconnect_time: time of device disconnect
  638. * @control_unit_queuing_time: time of control unit queuing
  639. * @device_active_only_time: time of device active only
  640. * @device_busy_time: time of device busy
  641. * @initial_command_response_time: initial command response time
  642. * @reserved: unused
  643. *
  644. * The measurement block as used by the hardware. May be in any 64 bit physical
  645. * location.
  646. * The fields are described further in z/Architecture Principles of Operation,
  647. * third edition, chapter 17.
  648. */
  649. struct cmbe {
  650. u32 ssch_rsch_count;
  651. u32 sample_count;
  652. u32 device_connect_time;
  653. u32 function_pending_time;
  654. u32 device_disconnect_time;
  655. u32 control_unit_queuing_time;
  656. u32 device_active_only_time;
  657. u32 device_busy_time;
  658. u32 initial_command_response_time;
  659. u32 reserved[7];
  660. } __packed __aligned(64);
  661. static struct kmem_cache *cmbe_cache;
  662. static int alloc_cmbe(struct ccw_device *cdev)
  663. {
  664. struct cmb_data *cmb_data;
  665. struct cmbe *cmbe;
  666. int ret = -ENOMEM;
  667. cmbe = kmem_cache_zalloc(cmbe_cache, GFP_KERNEL);
  668. if (!cmbe)
  669. return ret;
  670. cmb_data = kzalloc(sizeof(*cmb_data), GFP_KERNEL);
  671. if (!cmb_data)
  672. goto out_free;
  673. cmb_data->last_block = kzalloc(sizeof(struct cmbe), GFP_KERNEL);
  674. if (!cmb_data->last_block)
  675. goto out_free;
  676. cmb_data->size = sizeof(*cmbe);
  677. cmb_data->hw_block = cmbe;
  678. spin_lock(&cmb_area.lock);
  679. spin_lock_irq(cdev->ccwlock);
  680. if (cdev->private->cmb)
  681. goto out_unlock;
  682. cdev->private->cmb = cmb_data;
  683. /* activate global measurement if this is the first channel */
  684. if (list_empty(&cmb_area.list))
  685. cmf_activate(NULL, CMF_ON);
  686. list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
  687. spin_unlock_irq(cdev->ccwlock);
  688. spin_unlock(&cmb_area.lock);
  689. return 0;
  690. out_unlock:
  691. spin_unlock_irq(cdev->ccwlock);
  692. spin_unlock(&cmb_area.lock);
  693. ret = -EBUSY;
  694. out_free:
  695. if (cmb_data)
  696. kfree(cmb_data->last_block);
  697. kfree(cmb_data);
  698. kmem_cache_free(cmbe_cache, cmbe);
  699. return ret;
  700. }
  701. static void free_cmbe(struct ccw_device *cdev)
  702. {
  703. struct cmb_data *cmb_data;
  704. spin_lock(&cmb_area.lock);
  705. spin_lock_irq(cdev->ccwlock);
  706. cmb_data = cdev->private->cmb;
  707. cdev->private->cmb = NULL;
  708. if (cmb_data) {
  709. kfree(cmb_data->last_block);
  710. kmem_cache_free(cmbe_cache, cmb_data->hw_block);
  711. }
  712. kfree(cmb_data);
  713. /* deactivate global measurement if this is the last channel */
  714. list_del_init(&cdev->private->cmb_list);
  715. if (list_empty(&cmb_area.list))
  716. cmf_activate(NULL, CMF_OFF);
  717. spin_unlock_irq(cdev->ccwlock);
  718. spin_unlock(&cmb_area.lock);
  719. }
  720. static int set_cmbe(struct ccw_device *cdev, u32 mme)
  721. {
  722. unsigned long mba;
  723. struct cmb_data *cmb_data;
  724. unsigned long flags;
  725. spin_lock_irqsave(cdev->ccwlock, flags);
  726. if (!cdev->private->cmb) {
  727. spin_unlock_irqrestore(cdev->ccwlock, flags);
  728. return -EINVAL;
  729. }
  730. cmb_data = cdev->private->cmb;
  731. mba = mme ? (unsigned long) cmb_data->hw_block : 0;
  732. spin_unlock_irqrestore(cdev->ccwlock, flags);
  733. return set_schib_wait(cdev, mme, 1, mba);
  734. }
  735. static u64 read_cmbe(struct ccw_device *cdev, int index)
  736. {
  737. struct cmb_data *cmb_data;
  738. unsigned long flags;
  739. struct cmbe *cmb;
  740. u64 ret = 0;
  741. u32 val;
  742. spin_lock_irqsave(cdev->ccwlock, flags);
  743. cmb_data = cdev->private->cmb;
  744. if (!cmb_data)
  745. goto out;
  746. cmb = cmb_data->hw_block;
  747. switch (index) {
  748. case avg_utilization:
  749. ret = __cmb_utilization(cmb->device_connect_time,
  750. cmb->function_pending_time,
  751. cmb->device_disconnect_time,
  752. cdev->private->cmb_start_time);
  753. goto out;
  754. case cmb_ssch_rsch_count:
  755. ret = cmb->ssch_rsch_count;
  756. goto out;
  757. case cmb_sample_count:
  758. ret = cmb->sample_count;
  759. goto out;
  760. case cmb_device_connect_time:
  761. val = cmb->device_connect_time;
  762. break;
  763. case cmb_function_pending_time:
  764. val = cmb->function_pending_time;
  765. break;
  766. case cmb_device_disconnect_time:
  767. val = cmb->device_disconnect_time;
  768. break;
  769. case cmb_control_unit_queuing_time:
  770. val = cmb->control_unit_queuing_time;
  771. break;
  772. case cmb_device_active_only_time:
  773. val = cmb->device_active_only_time;
  774. break;
  775. case cmb_device_busy_time:
  776. val = cmb->device_busy_time;
  777. break;
  778. case cmb_initial_command_response_time:
  779. val = cmb->initial_command_response_time;
  780. break;
  781. default:
  782. goto out;
  783. }
  784. ret = time_to_avg_nsec(val, cmb->sample_count);
  785. out:
  786. spin_unlock_irqrestore(cdev->ccwlock, flags);
  787. return ret;
  788. }
  789. static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data)
  790. {
  791. struct cmbe *cmb;
  792. struct cmb_data *cmb_data;
  793. u64 time;
  794. unsigned long flags;
  795. int ret;
  796. ret = cmf_cmb_copy_wait(cdev);
  797. if (ret < 0)
  798. return ret;
  799. spin_lock_irqsave(cdev->ccwlock, flags);
  800. cmb_data = cdev->private->cmb;
  801. if (!cmb_data) {
  802. ret = -ENODEV;
  803. goto out;
  804. }
  805. if (cmb_data->last_update == 0) {
  806. ret = -EAGAIN;
  807. goto out;
  808. }
  809. time = cmb_data->last_update - cdev->private->cmb_start_time;
  810. memset (data, 0, sizeof(struct cmbdata));
  811. /* we only know values before device_busy_time */
  812. data->size = offsetof(struct cmbdata, device_busy_time);
  813. data->elapsed_time = tod_to_ns(time);
  814. cmb = cmb_data->last_block;
  815. /* copy data to new structure */
  816. data->ssch_rsch_count = cmb->ssch_rsch_count;
  817. data->sample_count = cmb->sample_count;
  818. /* time fields are converted to nanoseconds while copying */
  819. data->device_connect_time = time_to_nsec(cmb->device_connect_time);
  820. data->function_pending_time = time_to_nsec(cmb->function_pending_time);
  821. data->device_disconnect_time =
  822. time_to_nsec(cmb->device_disconnect_time);
  823. data->control_unit_queuing_time
  824. = time_to_nsec(cmb->control_unit_queuing_time);
  825. data->device_active_only_time
  826. = time_to_nsec(cmb->device_active_only_time);
  827. data->device_busy_time = time_to_nsec(cmb->device_busy_time);
  828. data->initial_command_response_time
  829. = time_to_nsec(cmb->initial_command_response_time);
  830. ret = 0;
  831. out:
  832. spin_unlock_irqrestore(cdev->ccwlock, flags);
  833. return ret;
  834. }
  835. static void reset_cmbe(struct ccw_device *cdev)
  836. {
  837. cmf_generic_reset(cdev);
  838. }
  839. static struct attribute_group cmf_attr_group_ext;
  840. static struct cmb_operations cmbops_extended = {
  841. .alloc = alloc_cmbe,
  842. .free = free_cmbe,
  843. .set = set_cmbe,
  844. .read = read_cmbe,
  845. .readall = readall_cmbe,
  846. .reset = reset_cmbe,
  847. .attr_group = &cmf_attr_group_ext,
  848. };
  849. static ssize_t cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx)
  850. {
  851. return sprintf(buf, "%lld\n",
  852. (unsigned long long) cmf_read(to_ccwdev(dev), idx));
  853. }
  854. static ssize_t cmb_show_avg_sample_interval(struct device *dev,
  855. struct device_attribute *attr,
  856. char *buf)
  857. {
  858. struct ccw_device *cdev = to_ccwdev(dev);
  859. unsigned long count;
  860. long interval;
  861. count = cmf_read(cdev, cmb_sample_count);
  862. spin_lock_irq(cdev->ccwlock);
  863. if (count) {
  864. interval = get_tod_clock() - cdev->private->cmb_start_time;
  865. interval = tod_to_ns(interval);
  866. interval /= count;
  867. } else
  868. interval = -1;
  869. spin_unlock_irq(cdev->ccwlock);
  870. return sprintf(buf, "%ld\n", interval);
  871. }
  872. static ssize_t cmb_show_avg_utilization(struct device *dev,
  873. struct device_attribute *attr,
  874. char *buf)
  875. {
  876. unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization);
  877. return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10);
  878. }
  879. #define cmf_attr(name) \
  880. static ssize_t show_##name(struct device *dev, \
  881. struct device_attribute *attr, char *buf) \
  882. { return cmb_show_attr((dev), buf, cmb_##name); } \
  883. static DEVICE_ATTR(name, 0444, show_##name, NULL);
  884. #define cmf_attr_avg(name) \
  885. static ssize_t show_avg_##name(struct device *dev, \
  886. struct device_attribute *attr, char *buf) \
  887. { return cmb_show_attr((dev), buf, cmb_##name); } \
  888. static DEVICE_ATTR(avg_##name, 0444, show_avg_##name, NULL);
  889. cmf_attr(ssch_rsch_count);
  890. cmf_attr(sample_count);
  891. cmf_attr_avg(device_connect_time);
  892. cmf_attr_avg(function_pending_time);
  893. cmf_attr_avg(device_disconnect_time);
  894. cmf_attr_avg(control_unit_queuing_time);
  895. cmf_attr_avg(device_active_only_time);
  896. cmf_attr_avg(device_busy_time);
  897. cmf_attr_avg(initial_command_response_time);
  898. static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval,
  899. NULL);
  900. static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL);
  901. static struct attribute *cmf_attributes[] = {
  902. &dev_attr_avg_sample_interval.attr,
  903. &dev_attr_avg_utilization.attr,
  904. &dev_attr_ssch_rsch_count.attr,
  905. &dev_attr_sample_count.attr,
  906. &dev_attr_avg_device_connect_time.attr,
  907. &dev_attr_avg_function_pending_time.attr,
  908. &dev_attr_avg_device_disconnect_time.attr,
  909. &dev_attr_avg_control_unit_queuing_time.attr,
  910. &dev_attr_avg_device_active_only_time.attr,
  911. NULL,
  912. };
  913. static struct attribute_group cmf_attr_group = {
  914. .name = "cmf",
  915. .attrs = cmf_attributes,
  916. };
  917. static struct attribute *cmf_attributes_ext[] = {
  918. &dev_attr_avg_sample_interval.attr,
  919. &dev_attr_avg_utilization.attr,
  920. &dev_attr_ssch_rsch_count.attr,
  921. &dev_attr_sample_count.attr,
  922. &dev_attr_avg_device_connect_time.attr,
  923. &dev_attr_avg_function_pending_time.attr,
  924. &dev_attr_avg_device_disconnect_time.attr,
  925. &dev_attr_avg_control_unit_queuing_time.attr,
  926. &dev_attr_avg_device_active_only_time.attr,
  927. &dev_attr_avg_device_busy_time.attr,
  928. &dev_attr_avg_initial_command_response_time.attr,
  929. NULL,
  930. };
  931. static struct attribute_group cmf_attr_group_ext = {
  932. .name = "cmf",
  933. .attrs = cmf_attributes_ext,
  934. };
  935. static ssize_t cmb_enable_show(struct device *dev,
  936. struct device_attribute *attr,
  937. char *buf)
  938. {
  939. struct ccw_device *cdev = to_ccwdev(dev);
  940. return sprintf(buf, "%d\n", cmf_enabled(cdev));
  941. }
  942. static ssize_t cmb_enable_store(struct device *dev,
  943. struct device_attribute *attr, const char *buf,
  944. size_t c)
  945. {
  946. struct ccw_device *cdev = to_ccwdev(dev);
  947. unsigned long val;
  948. int ret;
  949. ret = kstrtoul(buf, 16, &val);
  950. if (ret)
  951. return ret;
  952. switch (val) {
  953. case 0:
  954. ret = disable_cmf(cdev);
  955. break;
  956. case 1:
  957. ret = enable_cmf(cdev);
  958. break;
  959. default:
  960. ret = -EINVAL;
  961. }
  962. return ret ? ret : c;
  963. }
  964. DEVICE_ATTR_RW(cmb_enable);
  965. int ccw_set_cmf(struct ccw_device *cdev, int enable)
  966. {
  967. return cmbops->set(cdev, enable ? 2 : 0);
  968. }
  969. /**
  970. * enable_cmf() - switch on the channel measurement for a specific device
  971. * @cdev: The ccw device to be enabled
  972. *
  973. * Enable channel measurements for @cdev. If this is called on a device
  974. * for which channel measurement is already enabled a reset of the
  975. * measurement data is triggered.
  976. * Returns: %0 for success or a negative error value.
  977. * Context:
  978. * non-atomic
  979. */
  980. int enable_cmf(struct ccw_device *cdev)
  981. {
  982. int ret = 0;
  983. device_lock(&cdev->dev);
  984. if (cmf_enabled(cdev)) {
  985. cmbops->reset(cdev);
  986. goto out_unlock;
  987. }
  988. get_device(&cdev->dev);
  989. ret = cmbops->alloc(cdev);
  990. if (ret)
  991. goto out;
  992. cmbops->reset(cdev);
  993. ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group);
  994. if (ret) {
  995. cmbops->free(cdev);
  996. goto out;
  997. }
  998. ret = cmbops->set(cdev, 2);
  999. if (ret) {
  1000. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1001. cmbops->free(cdev);
  1002. }
  1003. out:
  1004. if (ret)
  1005. put_device(&cdev->dev);
  1006. out_unlock:
  1007. device_unlock(&cdev->dev);
  1008. return ret;
  1009. }
  1010. /**
  1011. * __disable_cmf() - switch off the channel measurement for a specific device
  1012. * @cdev: The ccw device to be disabled
  1013. *
  1014. * Returns: %0 for success or a negative error value.
  1015. *
  1016. * Context:
  1017. * non-atomic, device_lock() held.
  1018. */
  1019. int __disable_cmf(struct ccw_device *cdev)
  1020. {
  1021. int ret;
  1022. ret = cmbops->set(cdev, 0);
  1023. if (ret)
  1024. return ret;
  1025. sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group);
  1026. cmbops->free(cdev);
  1027. put_device(&cdev->dev);
  1028. return ret;
  1029. }
  1030. /**
  1031. * disable_cmf() - switch off the channel measurement for a specific device
  1032. * @cdev: The ccw device to be disabled
  1033. *
  1034. * Returns: %0 for success or a negative error value.
  1035. *
  1036. * Context:
  1037. * non-atomic
  1038. */
  1039. int disable_cmf(struct ccw_device *cdev)
  1040. {
  1041. int ret;
  1042. device_lock(&cdev->dev);
  1043. ret = __disable_cmf(cdev);
  1044. device_unlock(&cdev->dev);
  1045. return ret;
  1046. }
  1047. /**
  1048. * cmf_read() - read one value from the current channel measurement block
  1049. * @cdev: the channel to be read
  1050. * @index: the index of the value to be read
  1051. *
  1052. * Returns: The value read or %0 if the value cannot be read.
  1053. *
  1054. * Context:
  1055. * any
  1056. */
  1057. u64 cmf_read(struct ccw_device *cdev, int index)
  1058. {
  1059. return cmbops->read(cdev, index);
  1060. }
  1061. /**
  1062. * cmf_readall() - read the current channel measurement block
  1063. * @cdev: the channel to be read
  1064. * @data: a pointer to a data block that will be filled
  1065. *
  1066. * Returns: %0 on success, a negative error value otherwise.
  1067. *
  1068. * Context:
  1069. * any
  1070. */
  1071. int cmf_readall(struct ccw_device *cdev, struct cmbdata *data)
  1072. {
  1073. return cmbops->readall(cdev, data);
  1074. }
  1075. /* Reenable cmf when a disconnected device becomes available again. */
  1076. int cmf_reenable(struct ccw_device *cdev)
  1077. {
  1078. cmbops->reset(cdev);
  1079. return cmbops->set(cdev, 2);
  1080. }
  1081. /**
  1082. * cmf_reactivate() - reactivate measurement block updates
  1083. *
  1084. * Use this during resume from hibernate.
  1085. */
  1086. void cmf_reactivate(void)
  1087. {
  1088. spin_lock(&cmb_area.lock);
  1089. if (!list_empty(&cmb_area.list))
  1090. cmf_activate(cmb_area.mem, CMF_ON);
  1091. spin_unlock(&cmb_area.lock);
  1092. }
  1093. static int __init init_cmbe(void)
  1094. {
  1095. cmbe_cache = kmem_cache_create("cmbe_cache", sizeof(struct cmbe),
  1096. __alignof__(struct cmbe), 0, NULL);
  1097. return cmbe_cache ? 0 : -ENOMEM;
  1098. }
  1099. static int __init init_cmf(void)
  1100. {
  1101. char *format_string;
  1102. char *detect_string;
  1103. int ret;
  1104. /*
  1105. * If the user did not give a parameter, see if we are running on a
  1106. * machine supporting extended measurement blocks, otherwise fall back
  1107. * to basic mode.
  1108. */
  1109. if (format == CMF_AUTODETECT) {
  1110. if (!css_general_characteristics.ext_mb) {
  1111. format = CMF_BASIC;
  1112. } else {
  1113. format = CMF_EXTENDED;
  1114. }
  1115. detect_string = "autodetected";
  1116. } else {
  1117. detect_string = "parameter";
  1118. }
  1119. switch (format) {
  1120. case CMF_BASIC:
  1121. format_string = "basic";
  1122. cmbops = &cmbops_basic;
  1123. break;
  1124. case CMF_EXTENDED:
  1125. format_string = "extended";
  1126. cmbops = &cmbops_extended;
  1127. ret = init_cmbe();
  1128. if (ret)
  1129. return ret;
  1130. break;
  1131. default:
  1132. return -EINVAL;
  1133. }
  1134. pr_info("Channel measurement facility initialized using format "
  1135. "%s (mode %s)\n", format_string, detect_string);
  1136. return 0;
  1137. }
  1138. device_initcall(init_cmf);
  1139. EXPORT_SYMBOL_GPL(enable_cmf);
  1140. EXPORT_SYMBOL_GPL(disable_cmf);
  1141. EXPORT_SYMBOL_GPL(cmf_read);
  1142. EXPORT_SYMBOL_GPL(cmf_readall);