dimm_devs.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  6. #include <linux/moduleparam.h>
  7. #include <linux/vmalloc.h>
  8. #include <linux/device.h>
  9. #include <linux/ndctl.h>
  10. #include <linux/slab.h>
  11. #include <linux/io.h>
  12. #include <linux/fs.h>
  13. #include <linux/mm.h>
  14. #include "nd-core.h"
  15. #include "label.h"
  16. #include "pmem.h"
  17. #include "nd.h"
  18. static DEFINE_IDA(dimm_ida);
  19. /*
  20. * Retrieve bus and dimm handle and return if this bus supports
  21. * get_config_data commands
  22. */
  23. int nvdimm_check_config_data(struct device *dev)
  24. {
  25. struct nvdimm *nvdimm = to_nvdimm(dev);
  26. if (!nvdimm->cmd_mask ||
  27. !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
  28. if (test_bit(NDD_LABELING, &nvdimm->flags))
  29. return -ENXIO;
  30. else
  31. return -ENOTTY;
  32. }
  33. return 0;
  34. }
  35. static int validate_dimm(struct nvdimm_drvdata *ndd)
  36. {
  37. int rc;
  38. if (!ndd)
  39. return -EINVAL;
  40. rc = nvdimm_check_config_data(ndd->dev);
  41. if (rc)
  42. dev_dbg(ndd->dev, "%ps: %s error: %d\n",
  43. __builtin_return_address(0), __func__, rc);
  44. return rc;
  45. }
  46. /**
  47. * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
  48. * @ndd: dimm to initialize
  49. *
  50. * Returns: %0 if the area is already valid, -errno on error
  51. */
  52. int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
  53. {
  54. struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
  55. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  56. struct nvdimm_bus_descriptor *nd_desc;
  57. int rc = validate_dimm(ndd);
  58. int cmd_rc = 0;
  59. if (rc)
  60. return rc;
  61. if (cmd->config_size)
  62. return 0; /* already valid */
  63. memset(cmd, 0, sizeof(*cmd));
  64. nd_desc = nvdimm_bus->nd_desc;
  65. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  66. ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
  67. if (rc < 0)
  68. return rc;
  69. return cmd_rc;
  70. }
  71. int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
  72. size_t offset, size_t len)
  73. {
  74. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  75. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  76. int rc = validate_dimm(ndd), cmd_rc = 0;
  77. struct nd_cmd_get_config_data_hdr *cmd;
  78. size_t max_cmd_size, buf_offset;
  79. if (rc)
  80. return rc;
  81. if (offset + len > ndd->nsarea.config_size)
  82. return -ENXIO;
  83. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  84. cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
  85. if (!cmd)
  86. return -ENOMEM;
  87. for (buf_offset = 0; len;
  88. len -= cmd->in_length, buf_offset += cmd->in_length) {
  89. size_t cmd_size;
  90. cmd->in_offset = offset + buf_offset;
  91. cmd->in_length = min(max_cmd_size, len);
  92. cmd_size = sizeof(*cmd) + cmd->in_length;
  93. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  94. ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  95. if (rc < 0)
  96. break;
  97. if (cmd_rc < 0) {
  98. rc = cmd_rc;
  99. break;
  100. }
  101. /* out_buf should be valid, copy it into our output buffer */
  102. memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
  103. }
  104. kvfree(cmd);
  105. return rc;
  106. }
  107. int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
  108. void *buf, size_t len)
  109. {
  110. size_t max_cmd_size, buf_offset;
  111. struct nd_cmd_set_config_hdr *cmd;
  112. int rc = validate_dimm(ndd), cmd_rc = 0;
  113. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  114. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  115. if (rc)
  116. return rc;
  117. if (offset + len > ndd->nsarea.config_size)
  118. return -ENXIO;
  119. max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
  120. cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
  121. if (!cmd)
  122. return -ENOMEM;
  123. for (buf_offset = 0; len; len -= cmd->in_length,
  124. buf_offset += cmd->in_length) {
  125. size_t cmd_size;
  126. cmd->in_offset = offset + buf_offset;
  127. cmd->in_length = min(max_cmd_size, len);
  128. memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
  129. /* status is output in the last 4-bytes of the command buffer */
  130. cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
  131. rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
  132. ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
  133. if (rc < 0)
  134. break;
  135. if (cmd_rc < 0) {
  136. rc = cmd_rc;
  137. break;
  138. }
  139. }
  140. kvfree(cmd);
  141. return rc;
  142. }
  143. void nvdimm_set_labeling(struct device *dev)
  144. {
  145. struct nvdimm *nvdimm = to_nvdimm(dev);
  146. set_bit(NDD_LABELING, &nvdimm->flags);
  147. }
  148. void nvdimm_set_locked(struct device *dev)
  149. {
  150. struct nvdimm *nvdimm = to_nvdimm(dev);
  151. set_bit(NDD_LOCKED, &nvdimm->flags);
  152. }
  153. void nvdimm_clear_locked(struct device *dev)
  154. {
  155. struct nvdimm *nvdimm = to_nvdimm(dev);
  156. clear_bit(NDD_LOCKED, &nvdimm->flags);
  157. }
  158. static void nvdimm_release(struct device *dev)
  159. {
  160. struct nvdimm *nvdimm = to_nvdimm(dev);
  161. ida_free(&dimm_ida, nvdimm->id);
  162. kfree(nvdimm);
  163. }
  164. struct nvdimm *to_nvdimm(struct device *dev)
  165. {
  166. struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
  167. WARN_ON(!is_nvdimm(dev));
  168. return nvdimm;
  169. }
  170. EXPORT_SYMBOL_GPL(to_nvdimm);
  171. struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
  172. {
  173. struct nvdimm *nvdimm = nd_mapping->nvdimm;
  174. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  175. return dev_get_drvdata(&nvdimm->dev);
  176. }
  177. EXPORT_SYMBOL(to_ndd);
  178. void nvdimm_drvdata_release(struct kref *kref)
  179. {
  180. struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
  181. struct device *dev = ndd->dev;
  182. struct resource *res, *_r;
  183. dev_dbg(dev, "trace\n");
  184. nvdimm_bus_lock(dev);
  185. for_each_dpa_resource_safe(ndd, res, _r)
  186. nvdimm_free_dpa(ndd, res);
  187. nvdimm_bus_unlock(dev);
  188. kvfree(ndd->data);
  189. kfree(ndd);
  190. put_device(dev);
  191. }
  192. void get_ndd(struct nvdimm_drvdata *ndd)
  193. {
  194. kref_get(&ndd->kref);
  195. }
  196. void put_ndd(struct nvdimm_drvdata *ndd)
  197. {
  198. if (ndd)
  199. kref_put(&ndd->kref, nvdimm_drvdata_release);
  200. }
  201. const char *nvdimm_name(struct nvdimm *nvdimm)
  202. {
  203. return dev_name(&nvdimm->dev);
  204. }
  205. EXPORT_SYMBOL_GPL(nvdimm_name);
  206. struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
  207. {
  208. return &nvdimm->dev.kobj;
  209. }
  210. EXPORT_SYMBOL_GPL(nvdimm_kobj);
  211. unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
  212. {
  213. return nvdimm->cmd_mask;
  214. }
  215. EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
  216. void *nvdimm_provider_data(struct nvdimm *nvdimm)
  217. {
  218. if (nvdimm)
  219. return nvdimm->provider_data;
  220. return NULL;
  221. }
  222. EXPORT_SYMBOL_GPL(nvdimm_provider_data);
  223. static ssize_t commands_show(struct device *dev,
  224. struct device_attribute *attr, char *buf)
  225. {
  226. struct nvdimm *nvdimm = to_nvdimm(dev);
  227. int cmd, len = 0;
  228. if (!nvdimm->cmd_mask)
  229. return sprintf(buf, "\n");
  230. for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
  231. len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
  232. len += sprintf(buf + len, "\n");
  233. return len;
  234. }
  235. static DEVICE_ATTR_RO(commands);
  236. static ssize_t flags_show(struct device *dev,
  237. struct device_attribute *attr, char *buf)
  238. {
  239. struct nvdimm *nvdimm = to_nvdimm(dev);
  240. return sprintf(buf, "%s%s\n",
  241. test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
  242. test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
  243. }
  244. static DEVICE_ATTR_RO(flags);
  245. static ssize_t state_show(struct device *dev, struct device_attribute *attr,
  246. char *buf)
  247. {
  248. struct nvdimm *nvdimm = to_nvdimm(dev);
  249. /*
  250. * The state may be in the process of changing, userspace should
  251. * quiesce probing if it wants a static answer
  252. */
  253. nvdimm_bus_lock(dev);
  254. nvdimm_bus_unlock(dev);
  255. return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
  256. ? "active" : "idle");
  257. }
  258. static DEVICE_ATTR_RO(state);
  259. static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
  260. {
  261. struct device *dev;
  262. ssize_t rc;
  263. u32 nfree;
  264. if (!ndd)
  265. return -ENXIO;
  266. dev = ndd->dev;
  267. nvdimm_bus_lock(dev);
  268. nfree = nd_label_nfree(ndd);
  269. if (nfree - 1 > nfree) {
  270. dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
  271. nfree = 0;
  272. } else
  273. nfree--;
  274. rc = sprintf(buf, "%d\n", nfree);
  275. nvdimm_bus_unlock(dev);
  276. return rc;
  277. }
  278. static ssize_t available_slots_show(struct device *dev,
  279. struct device_attribute *attr, char *buf)
  280. {
  281. ssize_t rc;
  282. device_lock(dev);
  283. rc = __available_slots_show(dev_get_drvdata(dev), buf);
  284. device_unlock(dev);
  285. return rc;
  286. }
  287. static DEVICE_ATTR_RO(available_slots);
  288. static ssize_t security_show(struct device *dev,
  289. struct device_attribute *attr, char *buf)
  290. {
  291. struct nvdimm *nvdimm = to_nvdimm(dev);
  292. /*
  293. * For the test version we need to poll the "hardware" in order
  294. * to get the updated status for unlock testing.
  295. */
  296. if (IS_ENABLED(CONFIG_NVDIMM_SECURITY_TEST))
  297. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  298. if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
  299. return sprintf(buf, "overwrite\n");
  300. if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
  301. return sprintf(buf, "disabled\n");
  302. if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
  303. return sprintf(buf, "unlocked\n");
  304. if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
  305. return sprintf(buf, "locked\n");
  306. return -ENOTTY;
  307. }
  308. static ssize_t frozen_show(struct device *dev,
  309. struct device_attribute *attr, char *buf)
  310. {
  311. struct nvdimm *nvdimm = to_nvdimm(dev);
  312. return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
  313. &nvdimm->sec.flags));
  314. }
  315. static DEVICE_ATTR_RO(frozen);
  316. static ssize_t security_store(struct device *dev,
  317. struct device_attribute *attr, const char *buf, size_t len)
  318. {
  319. ssize_t rc;
  320. /*
  321. * Require all userspace triggered security management to be
  322. * done while probing is idle and the DIMM is not in active use
  323. * in any region.
  324. */
  325. device_lock(dev);
  326. nvdimm_bus_lock(dev);
  327. wait_nvdimm_bus_probe_idle(dev);
  328. rc = nvdimm_security_store(dev, buf, len);
  329. nvdimm_bus_unlock(dev);
  330. device_unlock(dev);
  331. return rc;
  332. }
  333. static DEVICE_ATTR_RW(security);
  334. static struct attribute *nvdimm_attributes[] = {
  335. &dev_attr_state.attr,
  336. &dev_attr_flags.attr,
  337. &dev_attr_commands.attr,
  338. &dev_attr_available_slots.attr,
  339. &dev_attr_security.attr,
  340. &dev_attr_frozen.attr,
  341. NULL,
  342. };
  343. static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
  344. {
  345. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  346. struct nvdimm *nvdimm = to_nvdimm(dev);
  347. if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
  348. return a->mode;
  349. if (!nvdimm->sec.flags)
  350. return 0;
  351. if (a == &dev_attr_security.attr) {
  352. /* Are there any state mutation ops (make writable)? */
  353. if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
  354. || nvdimm->sec.ops->change_key
  355. || nvdimm->sec.ops->erase
  356. || nvdimm->sec.ops->overwrite)
  357. return a->mode;
  358. return 0444;
  359. }
  360. if (nvdimm->sec.ops->freeze)
  361. return a->mode;
  362. return 0;
  363. }
  364. static const struct attribute_group nvdimm_attribute_group = {
  365. .attrs = nvdimm_attributes,
  366. .is_visible = nvdimm_visible,
  367. };
  368. static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
  369. {
  370. struct nvdimm *nvdimm = to_nvdimm(dev);
  371. enum nvdimm_fwa_result result;
  372. if (!nvdimm->fw_ops)
  373. return -EOPNOTSUPP;
  374. nvdimm_bus_lock(dev);
  375. result = nvdimm->fw_ops->activate_result(nvdimm);
  376. nvdimm_bus_unlock(dev);
  377. switch (result) {
  378. case NVDIMM_FWA_RESULT_NONE:
  379. return sprintf(buf, "none\n");
  380. case NVDIMM_FWA_RESULT_SUCCESS:
  381. return sprintf(buf, "success\n");
  382. case NVDIMM_FWA_RESULT_FAIL:
  383. return sprintf(buf, "fail\n");
  384. case NVDIMM_FWA_RESULT_NOTSTAGED:
  385. return sprintf(buf, "not_staged\n");
  386. case NVDIMM_FWA_RESULT_NEEDRESET:
  387. return sprintf(buf, "need_reset\n");
  388. default:
  389. return -ENXIO;
  390. }
  391. }
  392. static DEVICE_ATTR_ADMIN_RO(result);
  393. static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
  394. {
  395. struct nvdimm *nvdimm = to_nvdimm(dev);
  396. enum nvdimm_fwa_state state;
  397. if (!nvdimm->fw_ops)
  398. return -EOPNOTSUPP;
  399. nvdimm_bus_lock(dev);
  400. state = nvdimm->fw_ops->activate_state(nvdimm);
  401. nvdimm_bus_unlock(dev);
  402. switch (state) {
  403. case NVDIMM_FWA_IDLE:
  404. return sprintf(buf, "idle\n");
  405. case NVDIMM_FWA_BUSY:
  406. return sprintf(buf, "busy\n");
  407. case NVDIMM_FWA_ARMED:
  408. return sprintf(buf, "armed\n");
  409. default:
  410. return -ENXIO;
  411. }
  412. }
  413. static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
  414. const char *buf, size_t len)
  415. {
  416. struct nvdimm *nvdimm = to_nvdimm(dev);
  417. enum nvdimm_fwa_trigger arg;
  418. int rc;
  419. if (!nvdimm->fw_ops)
  420. return -EOPNOTSUPP;
  421. if (sysfs_streq(buf, "arm"))
  422. arg = NVDIMM_FWA_ARM;
  423. else if (sysfs_streq(buf, "disarm"))
  424. arg = NVDIMM_FWA_DISARM;
  425. else
  426. return -EINVAL;
  427. nvdimm_bus_lock(dev);
  428. rc = nvdimm->fw_ops->arm(nvdimm, arg);
  429. nvdimm_bus_unlock(dev);
  430. if (rc < 0)
  431. return rc;
  432. return len;
  433. }
  434. static DEVICE_ATTR_ADMIN_RW(activate);
  435. static struct attribute *nvdimm_firmware_attributes[] = {
  436. &dev_attr_activate.attr,
  437. &dev_attr_result.attr,
  438. NULL,
  439. };
  440. static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
  441. {
  442. struct device *dev = container_of(kobj, typeof(*dev), kobj);
  443. struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
  444. struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
  445. struct nvdimm *nvdimm = to_nvdimm(dev);
  446. enum nvdimm_fwa_capability cap;
  447. if (!nd_desc->fw_ops)
  448. return 0;
  449. if (!nvdimm->fw_ops)
  450. return 0;
  451. nvdimm_bus_lock(dev);
  452. cap = nd_desc->fw_ops->capability(nd_desc);
  453. nvdimm_bus_unlock(dev);
  454. if (cap < NVDIMM_FWA_CAP_QUIESCE)
  455. return 0;
  456. return a->mode;
  457. }
  458. static const struct attribute_group nvdimm_firmware_attribute_group = {
  459. .name = "firmware",
  460. .attrs = nvdimm_firmware_attributes,
  461. .is_visible = nvdimm_firmware_visible,
  462. };
  463. static const struct attribute_group *nvdimm_attribute_groups[] = {
  464. &nd_device_attribute_group,
  465. &nvdimm_attribute_group,
  466. &nvdimm_firmware_attribute_group,
  467. NULL,
  468. };
  469. static const struct device_type nvdimm_device_type = {
  470. .name = "nvdimm",
  471. .release = nvdimm_release,
  472. .groups = nvdimm_attribute_groups,
  473. };
  474. bool is_nvdimm(const struct device *dev)
  475. {
  476. return dev->type == &nvdimm_device_type;
  477. }
  478. static struct lock_class_key nvdimm_key;
  479. struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
  480. void *provider_data, const struct attribute_group **groups,
  481. unsigned long flags, unsigned long cmd_mask, int num_flush,
  482. struct resource *flush_wpq, const char *dimm_id,
  483. const struct nvdimm_security_ops *sec_ops,
  484. const struct nvdimm_fw_ops *fw_ops)
  485. {
  486. struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
  487. struct device *dev;
  488. if (!nvdimm)
  489. return NULL;
  490. nvdimm->id = ida_alloc(&dimm_ida, GFP_KERNEL);
  491. if (nvdimm->id < 0) {
  492. kfree(nvdimm);
  493. return NULL;
  494. }
  495. nvdimm->dimm_id = dimm_id;
  496. nvdimm->provider_data = provider_data;
  497. nvdimm->flags = flags;
  498. nvdimm->cmd_mask = cmd_mask;
  499. nvdimm->num_flush = num_flush;
  500. nvdimm->flush_wpq = flush_wpq;
  501. atomic_set(&nvdimm->busy, 0);
  502. dev = &nvdimm->dev;
  503. dev_set_name(dev, "nmem%d", nvdimm->id);
  504. dev->parent = &nvdimm_bus->dev;
  505. dev->type = &nvdimm_device_type;
  506. dev->devt = MKDEV(nvdimm_major, nvdimm->id);
  507. dev->groups = groups;
  508. nvdimm->sec.ops = sec_ops;
  509. nvdimm->fw_ops = fw_ops;
  510. nvdimm->sec.overwrite_tmo = 0;
  511. INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
  512. /*
  513. * Security state must be initialized before device_add() for
  514. * attribute visibility.
  515. */
  516. /* get security state and extended (master) state */
  517. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  518. nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
  519. device_initialize(dev);
  520. lockdep_set_class(&dev->mutex, &nvdimm_key);
  521. if (test_bit(NDD_REGISTER_SYNC, &flags))
  522. nd_device_register_sync(dev);
  523. else
  524. nd_device_register(dev);
  525. return nvdimm;
  526. }
  527. EXPORT_SYMBOL_GPL(__nvdimm_create);
  528. void nvdimm_delete(struct nvdimm *nvdimm)
  529. {
  530. struct device *dev = &nvdimm->dev;
  531. bool dev_put = false;
  532. /* We are shutting down. Make state frozen artificially. */
  533. nvdimm_bus_lock(dev);
  534. set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
  535. if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
  536. dev_put = true;
  537. nvdimm_bus_unlock(dev);
  538. cancel_delayed_work_sync(&nvdimm->dwork);
  539. if (dev_put)
  540. put_device(dev);
  541. nd_device_unregister(dev, ND_SYNC);
  542. }
  543. EXPORT_SYMBOL_GPL(nvdimm_delete);
  544. static void shutdown_security_notify(void *data)
  545. {
  546. struct nvdimm *nvdimm = data;
  547. sysfs_put(nvdimm->sec.overwrite_state);
  548. }
  549. int nvdimm_security_setup_events(struct device *dev)
  550. {
  551. struct nvdimm *nvdimm = to_nvdimm(dev);
  552. if (!nvdimm->sec.flags || !nvdimm->sec.ops
  553. || !nvdimm->sec.ops->overwrite)
  554. return 0;
  555. nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
  556. if (!nvdimm->sec.overwrite_state)
  557. return -ENOMEM;
  558. return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
  559. }
  560. EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
  561. int nvdimm_in_overwrite(struct nvdimm *nvdimm)
  562. {
  563. return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
  564. }
  565. EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
  566. int nvdimm_security_freeze(struct nvdimm *nvdimm)
  567. {
  568. int rc;
  569. WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
  570. if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
  571. return -EOPNOTSUPP;
  572. if (!nvdimm->sec.flags)
  573. return -EIO;
  574. if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
  575. dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
  576. return -EBUSY;
  577. }
  578. rc = nvdimm->sec.ops->freeze(nvdimm);
  579. nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
  580. return rc;
  581. }
  582. static unsigned long dpa_align(struct nd_region *nd_region)
  583. {
  584. struct device *dev = &nd_region->dev;
  585. if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
  586. "bus lock required for capacity provision\n"))
  587. return 0;
  588. if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
  589. % nd_region->ndr_mappings,
  590. "invalid region align %#lx mappings: %d\n",
  591. nd_region->align, nd_region->ndr_mappings))
  592. return 0;
  593. return nd_region->align / nd_region->ndr_mappings;
  594. }
  595. /**
  596. * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
  597. * contiguous unallocated dpa range.
  598. * @nd_region: constrain available space check to this reference region
  599. * @nd_mapping: container of dpa-resource-root + labels
  600. *
  601. * Returns: %0 if there is an alignment error, otherwise the max
  602. * unallocated dpa range
  603. */
  604. resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
  605. struct nd_mapping *nd_mapping)
  606. {
  607. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  608. struct nvdimm_bus *nvdimm_bus;
  609. resource_size_t max = 0;
  610. struct resource *res;
  611. unsigned long align;
  612. /* if a dimm is disabled the available capacity is zero */
  613. if (!ndd)
  614. return 0;
  615. align = dpa_align(nd_region);
  616. if (!align)
  617. return 0;
  618. nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
  619. if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
  620. return 0;
  621. for_each_dpa_resource(ndd, res) {
  622. resource_size_t start, end;
  623. if (strcmp(res->name, "pmem-reserve") != 0)
  624. continue;
  625. /* trim free space relative to current alignment setting */
  626. start = ALIGN(res->start, align);
  627. end = ALIGN_DOWN(res->end + 1, align) - 1;
  628. if (end < start)
  629. continue;
  630. if (end - start + 1 > max)
  631. max = end - start + 1;
  632. }
  633. release_free_pmem(nvdimm_bus, nd_mapping);
  634. return max;
  635. }
  636. /**
  637. * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
  638. * @nd_mapping: container of dpa-resource-root + labels
  639. * @nd_region: constrain available space check to this reference region
  640. *
  641. * Validate that a PMEM label, if present, aligns with the start of an
  642. * interleave set.
  643. *
  644. * Returns: %0 if there is an alignment error, otherwise the unallocated dpa
  645. */
  646. resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
  647. struct nd_mapping *nd_mapping)
  648. {
  649. struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
  650. resource_size_t map_start, map_end, busy = 0;
  651. struct resource *res;
  652. unsigned long align;
  653. if (!ndd)
  654. return 0;
  655. align = dpa_align(nd_region);
  656. if (!align)
  657. return 0;
  658. map_start = nd_mapping->start;
  659. map_end = map_start + nd_mapping->size - 1;
  660. for_each_dpa_resource(ndd, res) {
  661. resource_size_t start, end;
  662. start = ALIGN_DOWN(res->start, align);
  663. end = ALIGN(res->end + 1, align) - 1;
  664. if (start >= map_start && start < map_end) {
  665. if (end > map_end) {
  666. nd_dbg_dpa(nd_region, ndd, res,
  667. "misaligned to iset\n");
  668. return 0;
  669. }
  670. busy += end - start + 1;
  671. } else if (end >= map_start && end <= map_end) {
  672. busy += end - start + 1;
  673. } else if (map_start > start && map_start < end) {
  674. /* total eclipse of the mapping */
  675. busy += nd_mapping->size;
  676. }
  677. }
  678. if (busy < nd_mapping->size)
  679. return ALIGN_DOWN(nd_mapping->size - busy, align);
  680. return 0;
  681. }
  682. void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
  683. {
  684. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  685. kfree(res->name);
  686. __release_region(&ndd->dpa, res->start, resource_size(res));
  687. }
  688. struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
  689. struct nd_label_id *label_id, resource_size_t start,
  690. resource_size_t n)
  691. {
  692. char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
  693. struct resource *res;
  694. if (!name)
  695. return NULL;
  696. WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
  697. res = __request_region(&ndd->dpa, start, n, name, 0);
  698. if (!res)
  699. kfree(name);
  700. return res;
  701. }
  702. /**
  703. * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
  704. * @ndd: container of dpa-resource-root + labels
  705. * @label_id: dpa resource name of the form pmem-<human readable uuid>
  706. *
  707. * Returns: sum of the dpa allocated to the label_id
  708. */
  709. resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
  710. struct nd_label_id *label_id)
  711. {
  712. resource_size_t allocated = 0;
  713. struct resource *res;
  714. for_each_dpa_resource(ndd, res)
  715. if (strcmp(res->name, label_id->id) == 0)
  716. allocated += resource_size(res);
  717. return allocated;
  718. }
  719. static int count_dimms(struct device *dev, void *c)
  720. {
  721. int *count = c;
  722. if (is_nvdimm(dev))
  723. (*count)++;
  724. return 0;
  725. }
  726. int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
  727. {
  728. int count = 0;
  729. /* Flush any possible dimm registration failures */
  730. nd_synchronize();
  731. device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
  732. dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
  733. if (count != dimm_count)
  734. return -ENXIO;
  735. return 0;
  736. }
  737. EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
  738. void __exit nvdimm_devs_exit(void)
  739. {
  740. ida_destroy(&dimm_ida);
  741. }