core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #include <linux/libnvdimm.h>
  6. #include <linux/suspend.h>
  7. #include <linux/export.h>
  8. #include <linux/module.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/device.h>
  11. #include <linux/ctype.h>
  12. #include <linux/ndctl.h>
  13. #include <linux/mutex.h>
  14. #include <linux/slab.h>
  15. #include <linux/io.h>
  16. #include "nd-core.h"
  17. #include "nd.h"
  18. LIST_HEAD(nvdimm_bus_list);
  19. DEFINE_MUTEX(nvdimm_bus_list_mutex);
  20. void nvdimm_bus_lock(struct device *dev)
  21. {
  22. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  23. if (!nvdimm_bus)
  24. return;
  25. mutex_lock(&nvdimm_bus->reconfig_mutex);
  26. }
  27. EXPORT_SYMBOL(nvdimm_bus_lock);
  28. void nvdimm_bus_unlock(struct device *dev)
  29. {
  30. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  31. if (!nvdimm_bus)
  32. return;
  33. mutex_unlock(&nvdimm_bus->reconfig_mutex);
  34. }
  35. EXPORT_SYMBOL(nvdimm_bus_unlock);
  36. bool is_nvdimm_bus_locked(struct device *dev)
  37. {
  38. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  39. if (!nvdimm_bus)
  40. return false;
  41. return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
  42. }
  43. EXPORT_SYMBOL(is_nvdimm_bus_locked);
  44. struct nvdimm_map {
  45. struct nvdimm_bus *nvdimm_bus;
  46. struct list_head list;
  47. resource_size_t offset;
  48. unsigned long flags;
  49. size_t size;
  50. union {
  51. void *mem;
  52. void __iomem *iomem;
  53. };
  54. struct kref kref;
  55. };
  56. static struct nvdimm_map *find_nvdimm_map(struct device *dev,
  57. resource_size_t offset)
  58. {
  59. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  60. struct nvdimm_map *nvdimm_map;
  61. list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
  62. if (nvdimm_map->offset == offset)
  63. return nvdimm_map;
  64. return NULL;
  65. }
  66. static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
  67. resource_size_t offset, size_t size, unsigned long flags)
  68. {
  69. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  70. struct nvdimm_map *nvdimm_map;
  71. nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
  72. if (!nvdimm_map)
  73. return NULL;
  74. INIT_LIST_HEAD(&nvdimm_map->list);
  75. nvdimm_map->nvdimm_bus = nvdimm_bus;
  76. nvdimm_map->offset = offset;
  77. nvdimm_map->flags = flags;
  78. nvdimm_map->size = size;
  79. kref_init(&nvdimm_map->kref);
  80. if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
  81. dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
  82. &offset, size, dev_name(dev));
  83. goto err_request_region;
  84. }
  85. if (flags)
  86. nvdimm_map->mem = memremap(offset, size, flags);
  87. else
  88. nvdimm_map->iomem = ioremap(offset, size);
  89. if (!nvdimm_map->mem)
  90. goto err_map;
  91. dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
  92. __func__);
  93. list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
  94. return nvdimm_map;
  95. err_map:
  96. release_mem_region(offset, size);
  97. err_request_region:
  98. kfree(nvdimm_map);
  99. return NULL;
  100. }
  101. static void nvdimm_map_release(struct kref *kref)
  102. {
  103. struct nvdimm_bus *nvdimm_bus;
  104. struct nvdimm_map *nvdimm_map;
  105. nvdimm_map = container_of(kref, struct nvdimm_map, kref);
  106. nvdimm_bus = nvdimm_map->nvdimm_bus;
  107. dev_dbg(&nvdimm_bus->dev, "%pa\n", &nvdimm_map->offset);
  108. list_del(&nvdimm_map->list);
  109. if (nvdimm_map->flags)
  110. memunmap(nvdimm_map->mem);
  111. else
  112. iounmap(nvdimm_map->iomem);
  113. release_mem_region(nvdimm_map->offset, nvdimm_map->size);
  114. kfree(nvdimm_map);
  115. }
  116. static void nvdimm_map_put(void *data)
  117. {
  118. struct nvdimm_map *nvdimm_map = data;
  119. struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
  120. nvdimm_bus_lock(&nvdimm_bus->dev);
  121. kref_put(&nvdimm_map->kref, nvdimm_map_release);
  122. nvdimm_bus_unlock(&nvdimm_bus->dev);
  123. }
  124. /**
  125. * devm_nvdimm_memremap - map a resource that is shared across regions
  126. * @dev: device that will own a reference to the shared mapping
  127. * @offset: physical base address of the mapping
  128. * @size: mapping size
  129. * @flags: memremap flags, or, if zero, perform an ioremap instead
  130. */
  131. void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
  132. size_t size, unsigned long flags)
  133. {
  134. struct nvdimm_map *nvdimm_map;
  135. nvdimm_bus_lock(dev);
  136. nvdimm_map = find_nvdimm_map(dev, offset);
  137. if (!nvdimm_map)
  138. nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
  139. else
  140. kref_get(&nvdimm_map->kref);
  141. nvdimm_bus_unlock(dev);
  142. if (!nvdimm_map)
  143. return NULL;
  144. if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
  145. return NULL;
  146. return nvdimm_map->mem;
  147. }
  148. EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
  149. u64 nd_fletcher64(void *addr, size_t len, bool le)
  150. {
  151. u32 *buf = addr;
  152. u32 lo32 = 0;
  153. u64 hi32 = 0;
  154. int i;
  155. for (i = 0; i < len / sizeof(u32); i++) {
  156. lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
  157. hi32 += lo32;
  158. }
  159. return hi32 << 32 | lo32;
  160. }
  161. EXPORT_SYMBOL_GPL(nd_fletcher64);
  162. struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
  163. {
  164. /* struct nvdimm_bus definition is private to libnvdimm */
  165. return nvdimm_bus->nd_desc;
  166. }
  167. EXPORT_SYMBOL_GPL(to_nd_desc);
  168. struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
  169. {
  170. /* struct nvdimm_bus definition is private to libnvdimm */
  171. return &nvdimm_bus->dev;
  172. }
  173. EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
  174. /**
  175. * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
  176. * @dev: container device for the uuid property
  177. * @uuid_out: uuid buffer to replace
  178. * @buf: raw sysfs buffer to parse
  179. *
  180. * Enforce that uuids can only be changed while the device is disabled
  181. * (driver detached)
  182. * LOCKING: expects device_lock() is held on entry
  183. */
  184. int nd_uuid_store(struct device *dev, uuid_t **uuid_out, const char *buf,
  185. size_t len)
  186. {
  187. uuid_t uuid;
  188. int rc;
  189. if (dev->driver)
  190. return -EBUSY;
  191. rc = uuid_parse(buf, &uuid);
  192. if (rc)
  193. return rc;
  194. kfree(*uuid_out);
  195. *uuid_out = kmemdup(&uuid, sizeof(uuid), GFP_KERNEL);
  196. if (!(*uuid_out))
  197. return -ENOMEM;
  198. return 0;
  199. }
  200. ssize_t nd_size_select_show(unsigned long current_size,
  201. const unsigned long *supported, char *buf)
  202. {
  203. ssize_t len = 0;
  204. int i;
  205. for (i = 0; supported[i]; i++)
  206. if (current_size == supported[i])
  207. len += sprintf(buf + len, "[%ld] ", supported[i]);
  208. else
  209. len += sprintf(buf + len, "%ld ", supported[i]);
  210. len += sprintf(buf + len, "\n");
  211. return len;
  212. }
  213. ssize_t nd_size_select_store(struct device *dev, const char *buf,
  214. unsigned long *current_size, const unsigned long *supported)
  215. {
  216. unsigned long lbasize;
  217. int rc, i;
  218. if (dev->driver)
  219. return -EBUSY;
  220. rc = kstrtoul(buf, 0, &lbasize);
  221. if (rc)
  222. return rc;
  223. for (i = 0; supported[i]; i++)
  224. if (lbasize == supported[i])
  225. break;
  226. if (supported[i]) {
  227. *current_size = lbasize;
  228. return 0;
  229. } else {
  230. return -EINVAL;
  231. }
  232. }
  233. static ssize_t commands_show(struct device *dev,
  234. struct device_attribute *attr, char *buf)
  235. {
  236. int cmd, len = 0;
  237. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  238. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  239. for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
  240. len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
  241. len += sprintf(buf + len, "\n");
  242. return len;
  243. }
  244. static DEVICE_ATTR_RO(commands);
  245. static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
  246. {
  247. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  248. struct device *parent = nvdimm_bus->dev.parent;
  249. if (nd_desc->provider_name)
  250. return nd_desc->provider_name;
  251. else if (parent)
  252. return dev_name(parent);
  253. else
  254. return "unknown";
  255. }
  256. static ssize_t provider_show(struct device *dev,
  257. struct device_attribute *attr, char *buf)
  258. {
  259. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  260. return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
  261. }
  262. static DEVICE_ATTR_RO(provider);
  263. static int flush_namespaces(struct device *dev, void *data)
  264. {
  265. device_lock(dev);
  266. device_unlock(dev);
  267. return 0;
  268. }
  269. static int flush_regions_dimms(struct device *dev, void *data)
  270. {
  271. device_lock(dev);
  272. device_unlock(dev);
  273. device_for_each_child(dev, NULL, flush_namespaces);
  274. return 0;
  275. }
  276. static ssize_t wait_probe_show(struct device *dev,
  277. struct device_attribute *attr, char *buf)
  278. {
  279. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  280. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  281. int rc;
  282. if (nd_desc->flush_probe) {
  283. rc = nd_desc->flush_probe(nd_desc);
  284. if (rc)
  285. return rc;
  286. }
  287. nd_synchronize();
  288. device_for_each_child(dev, NULL, flush_regions_dimms);
  289. return sprintf(buf, "1\n");
  290. }
  291. static DEVICE_ATTR_RO(wait_probe);
  292. static struct attribute *nvdimm_bus_attributes[] = {
  293. &dev_attr_commands.attr,
  294. &dev_attr_wait_probe.attr,
  295. &dev_attr_provider.attr,
  296. NULL,
  297. };
  298. static const struct attribute_group nvdimm_bus_attribute_group = {
  299. .attrs = nvdimm_bus_attributes,
  300. };
  301. static ssize_t capability_show(struct device *dev,
  302. struct device_attribute *attr, char *buf)
  303. {
  304. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  305. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  306. enum nvdimm_fwa_capability cap;
  307. if (!nd_desc->fw_ops)
  308. return -EOPNOTSUPP;
  309. cap = nd_desc->fw_ops->capability(nd_desc);
  310. switch (cap) {
  311. case NVDIMM_FWA_CAP_QUIESCE:
  312. return sprintf(buf, "quiesce\n");
  313. case NVDIMM_FWA_CAP_LIVE:
  314. return sprintf(buf, "live\n");
  315. default:
  316. return -EOPNOTSUPP;
  317. }
  318. }
  319. static DEVICE_ATTR_RO(capability);
  320. static ssize_t activate_show(struct device *dev,
  321. struct device_attribute *attr, char *buf)
  322. {
  323. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  324. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  325. enum nvdimm_fwa_capability cap;
  326. enum nvdimm_fwa_state state;
  327. if (!nd_desc->fw_ops)
  328. return -EOPNOTSUPP;
  329. cap = nd_desc->fw_ops->capability(nd_desc);
  330. state = nd_desc->fw_ops->activate_state(nd_desc);
  331. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  332. return -EOPNOTSUPP;
  333. switch (state) {
  334. case NVDIMM_FWA_IDLE:
  335. return sprintf(buf, "idle\n");
  336. case NVDIMM_FWA_BUSY:
  337. return sprintf(buf, "busy\n");
  338. case NVDIMM_FWA_ARMED:
  339. return sprintf(buf, "armed\n");
  340. case NVDIMM_FWA_ARM_OVERFLOW:
  341. return sprintf(buf, "overflow\n");
  342. default:
  343. return -ENXIO;
  344. }
  345. }
  346. static int exec_firmware_activate(void *data)
  347. {
  348. struct nvdimm_bus_descriptor *nd_desc = data;
  349. return nd_desc->fw_ops->activate(nd_desc);
  350. }
  351. static ssize_t activate_store(struct device *dev,
  352. struct device_attribute *attr, const char *buf, size_t len)
  353. {
  354. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  355. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  356. enum nvdimm_fwa_state state;
  357. bool quiesce;
  358. ssize_t rc;
  359. if (!nd_desc->fw_ops)
  360. return -EOPNOTSUPP;
  361. if (sysfs_streq(buf, "live"))
  362. quiesce = false;
  363. else if (sysfs_streq(buf, "quiesce"))
  364. quiesce = true;
  365. else
  366. return -EINVAL;
  367. state = nd_desc->fw_ops->activate_state(nd_desc);
  368. switch (state) {
  369. case NVDIMM_FWA_BUSY:
  370. rc = -EBUSY;
  371. break;
  372. case NVDIMM_FWA_ARMED:
  373. case NVDIMM_FWA_ARM_OVERFLOW:
  374. if (quiesce)
  375. rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc);
  376. else
  377. rc = nd_desc->fw_ops->activate(nd_desc);
  378. break;
  379. case NVDIMM_FWA_IDLE:
  380. default:
  381. rc = -ENXIO;
  382. }
  383. if (rc == 0)
  384. rc = len;
  385. return rc;
  386. }
  387. static DEVICE_ATTR_ADMIN_RW(activate);
  388. static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
  389. {
  390. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  391. struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
  392. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  393. enum nvdimm_fwa_capability cap;
  394. /*
  395. * Both 'activate' and 'capability' disappear when no ops
  396. * detected, or a negative capability is indicated.
  397. */
  398. if (!nd_desc->fw_ops)
  399. return 0;
  400. cap = nd_desc->fw_ops->capability(nd_desc);
  401. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  402. return 0;
  403. return a->mode;
  404. }
  405. static struct attribute *nvdimm_bus_firmware_attributes[] = {
  406. &dev_attr_activate.attr,
  407. &dev_attr_capability.attr,
  408. NULL,
  409. };
  410. static const struct attribute_group nvdimm_bus_firmware_attribute_group = {
  411. .name = "firmware",
  412. .attrs = nvdimm_bus_firmware_attributes,
  413. .is_visible = nvdimm_bus_firmware_visible,
  414. };
  415. const struct attribute_group *nvdimm_bus_attribute_groups[] = {
  416. &nvdimm_bus_attribute_group,
  417. &nvdimm_bus_firmware_attribute_group,
  418. NULL,
  419. };
  420. int nvdimm_bus_add_badrange(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
  421. {
  422. return badrange_add(&nvdimm_bus->badrange, addr, length);
  423. }
  424. EXPORT_SYMBOL_GPL(nvdimm_bus_add_badrange);
  425. static __init int libnvdimm_init(void)
  426. {
  427. int rc;
  428. rc = nvdimm_bus_init();
  429. if (rc)
  430. return rc;
  431. rc = nvdimm_init();
  432. if (rc)
  433. goto err_dimm;
  434. rc = nd_region_init();
  435. if (rc)
  436. goto err_region;
  437. nd_label_init();
  438. return 0;
  439. err_region:
  440. nvdimm_exit();
  441. err_dimm:
  442. nvdimm_bus_exit();
  443. return rc;
  444. }
  445. static __exit void libnvdimm_exit(void)
  446. {
  447. WARN_ON(!list_empty(&nvdimm_bus_list));
  448. nd_region_exit();
  449. nvdimm_exit();
  450. nvdimm_bus_exit();
  451. nvdimm_devs_exit();
  452. }
  453. MODULE_DESCRIPTION("NVDIMM (Non-Volatile Memory Device) core");
  454. MODULE_LICENSE("GPL v2");
  455. MODULE_AUTHOR("Intel Corporation");
  456. subsys_initcall(libnvdimm_init);
  457. module_exit(libnvdimm_exit);