metric.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <linux/ceph/ceph_debug.h>
  3. #include <linux/types.h>
  4. #include <linux/percpu_counter.h>
  5. #include <linux/math64.h>
  6. #include "metric.h"
  7. #include "mds_client.h"
  8. static void ktime_to_ceph_timespec(struct ceph_timespec *ts, ktime_t val)
  9. {
  10. struct timespec64 t = ktime_to_timespec64(val);
  11. ceph_encode_timespec64(ts, &t);
  12. }
  13. static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
  14. struct ceph_mds_session *s)
  15. {
  16. struct ceph_metric_head *head;
  17. struct ceph_metric_cap *cap;
  18. struct ceph_metric_read_latency *read;
  19. struct ceph_metric_write_latency *write;
  20. struct ceph_metric_metadata_latency *meta;
  21. struct ceph_metric_dlease *dlease;
  22. struct ceph_opened_files *files;
  23. struct ceph_pinned_icaps *icaps;
  24. struct ceph_opened_inodes *inodes;
  25. struct ceph_read_io_size *rsize;
  26. struct ceph_write_io_size *wsize;
  27. struct ceph_client_metric *m = &mdsc->metric;
  28. u64 nr_caps = atomic64_read(&m->total_caps);
  29. u32 header_len = sizeof(struct ceph_metric_header);
  30. struct ceph_client *cl = mdsc->fsc->client;
  31. struct ceph_msg *msg;
  32. s64 sum;
  33. s32 items = 0;
  34. s32 len;
  35. /* Do not send the metrics until the MDS rank is ready */
  36. mutex_lock(&mdsc->mutex);
  37. if (ceph_mdsmap_get_state(mdsc->mdsmap, s->s_mds) != CEPH_MDS_STATE_ACTIVE) {
  38. mutex_unlock(&mdsc->mutex);
  39. return false;
  40. }
  41. mutex_unlock(&mdsc->mutex);
  42. len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
  43. + sizeof(*meta) + sizeof(*dlease) + sizeof(*files)
  44. + sizeof(*icaps) + sizeof(*inodes) + sizeof(*rsize)
  45. + sizeof(*wsize);
  46. msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
  47. if (!msg) {
  48. pr_err_client(cl, "to mds%d, failed to allocate message\n",
  49. s->s_mds);
  50. return false;
  51. }
  52. head = msg->front.iov_base;
  53. /* encode the cap metric */
  54. cap = (struct ceph_metric_cap *)(head + 1);
  55. cap->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
  56. cap->header.ver = 1;
  57. cap->header.compat = 1;
  58. cap->header.data_len = cpu_to_le32(sizeof(*cap) - header_len);
  59. cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
  60. cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
  61. cap->total = cpu_to_le64(nr_caps);
  62. items++;
  63. /* encode the read latency metric */
  64. read = (struct ceph_metric_read_latency *)(cap + 1);
  65. read->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
  66. read->header.ver = 2;
  67. read->header.compat = 1;
  68. read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
  69. sum = m->metric[METRIC_READ].latency_sum;
  70. ktime_to_ceph_timespec(&read->lat, sum);
  71. ktime_to_ceph_timespec(&read->avg, m->metric[METRIC_READ].latency_avg);
  72. read->sq_sum = cpu_to_le64(m->metric[METRIC_READ].latency_sq_sum);
  73. read->count = cpu_to_le64(m->metric[METRIC_READ].total);
  74. items++;
  75. /* encode the write latency metric */
  76. write = (struct ceph_metric_write_latency *)(read + 1);
  77. write->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
  78. write->header.ver = 2;
  79. write->header.compat = 1;
  80. write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
  81. sum = m->metric[METRIC_WRITE].latency_sum;
  82. ktime_to_ceph_timespec(&write->lat, sum);
  83. ktime_to_ceph_timespec(&write->avg, m->metric[METRIC_WRITE].latency_avg);
  84. write->sq_sum = cpu_to_le64(m->metric[METRIC_WRITE].latency_sq_sum);
  85. write->count = cpu_to_le64(m->metric[METRIC_WRITE].total);
  86. items++;
  87. /* encode the metadata latency metric */
  88. meta = (struct ceph_metric_metadata_latency *)(write + 1);
  89. meta->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
  90. meta->header.ver = 2;
  91. meta->header.compat = 1;
  92. meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
  93. sum = m->metric[METRIC_METADATA].latency_sum;
  94. ktime_to_ceph_timespec(&meta->lat, sum);
  95. ktime_to_ceph_timespec(&meta->avg, m->metric[METRIC_METADATA].latency_avg);
  96. meta->sq_sum = cpu_to_le64(m->metric[METRIC_METADATA].latency_sq_sum);
  97. meta->count = cpu_to_le64(m->metric[METRIC_METADATA].total);
  98. items++;
  99. /* encode the dentry lease metric */
  100. dlease = (struct ceph_metric_dlease *)(meta + 1);
  101. dlease->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
  102. dlease->header.ver = 1;
  103. dlease->header.compat = 1;
  104. dlease->header.data_len = cpu_to_le32(sizeof(*dlease) - header_len);
  105. dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
  106. dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
  107. dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
  108. items++;
  109. sum = percpu_counter_sum(&m->total_inodes);
  110. /* encode the opened files metric */
  111. files = (struct ceph_opened_files *)(dlease + 1);
  112. files->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_FILES);
  113. files->header.ver = 1;
  114. files->header.compat = 1;
  115. files->header.data_len = cpu_to_le32(sizeof(*files) - header_len);
  116. files->opened_files = cpu_to_le64(atomic64_read(&m->opened_files));
  117. files->total = cpu_to_le64(sum);
  118. items++;
  119. /* encode the pinned icaps metric */
  120. icaps = (struct ceph_pinned_icaps *)(files + 1);
  121. icaps->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_PINNED_ICAPS);
  122. icaps->header.ver = 1;
  123. icaps->header.compat = 1;
  124. icaps->header.data_len = cpu_to_le32(sizeof(*icaps) - header_len);
  125. icaps->pinned_icaps = cpu_to_le64(nr_caps);
  126. icaps->total = cpu_to_le64(sum);
  127. items++;
  128. /* encode the opened inodes metric */
  129. inodes = (struct ceph_opened_inodes *)(icaps + 1);
  130. inodes->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_OPENED_INODES);
  131. inodes->header.ver = 1;
  132. inodes->header.compat = 1;
  133. inodes->header.data_len = cpu_to_le32(sizeof(*inodes) - header_len);
  134. inodes->opened_inodes = cpu_to_le64(percpu_counter_sum(&m->opened_inodes));
  135. inodes->total = cpu_to_le64(sum);
  136. items++;
  137. /* encode the read io size metric */
  138. rsize = (struct ceph_read_io_size *)(inodes + 1);
  139. rsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_IO_SIZES);
  140. rsize->header.ver = 1;
  141. rsize->header.compat = 1;
  142. rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
  143. rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
  144. rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
  145. items++;
  146. /* encode the write io size metric */
  147. wsize = (struct ceph_write_io_size *)(rsize + 1);
  148. wsize->header.type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_IO_SIZES);
  149. wsize->header.ver = 1;
  150. wsize->header.compat = 1;
  151. wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
  152. wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
  153. wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
  154. items++;
  155. put_unaligned_le32(items, &head->num);
  156. msg->front.iov_len = len;
  157. msg->hdr.version = cpu_to_le16(1);
  158. msg->hdr.compat_version = cpu_to_le16(1);
  159. msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
  160. ceph_con_send(&s->s_con, msg);
  161. return true;
  162. }
  163. static void metric_get_session(struct ceph_mds_client *mdsc)
  164. {
  165. struct ceph_mds_session *s;
  166. int i;
  167. mutex_lock(&mdsc->mutex);
  168. for (i = 0; i < mdsc->max_sessions; i++) {
  169. s = __ceph_lookup_mds_session(mdsc, i);
  170. if (!s)
  171. continue;
  172. /*
  173. * Skip it if MDS doesn't support the metric collection,
  174. * or the MDS will close the session's socket connection
  175. * directly when it get this message.
  176. */
  177. if (check_session_state(s) &&
  178. test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
  179. mdsc->metric.session = s;
  180. break;
  181. }
  182. ceph_put_mds_session(s);
  183. }
  184. mutex_unlock(&mdsc->mutex);
  185. }
  186. static void metric_delayed_work(struct work_struct *work)
  187. {
  188. struct ceph_client_metric *m =
  189. container_of(work, struct ceph_client_metric, delayed_work.work);
  190. struct ceph_mds_client *mdsc =
  191. container_of(m, struct ceph_mds_client, metric);
  192. if (mdsc->stopping || disable_send_metrics)
  193. return;
  194. if (!m->session || !check_session_state(m->session)) {
  195. if (m->session) {
  196. ceph_put_mds_session(m->session);
  197. m->session = NULL;
  198. }
  199. metric_get_session(mdsc);
  200. }
  201. if (m->session) {
  202. ceph_mdsc_send_metrics(mdsc, m->session);
  203. metric_schedule_delayed(m);
  204. }
  205. }
  206. int ceph_metric_init(struct ceph_client_metric *m)
  207. {
  208. struct ceph_metric *metric;
  209. int ret, i;
  210. if (!m)
  211. return -EINVAL;
  212. atomic64_set(&m->total_dentries, 0);
  213. ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
  214. if (ret)
  215. return ret;
  216. ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
  217. if (ret)
  218. goto err_d_lease_mis;
  219. atomic64_set(&m->total_caps, 0);
  220. ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
  221. if (ret)
  222. goto err_i_caps_hit;
  223. ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
  224. if (ret)
  225. goto err_i_caps_mis;
  226. for (i = 0; i < METRIC_MAX; i++) {
  227. metric = &m->metric[i];
  228. spin_lock_init(&metric->lock);
  229. metric->size_sum = 0;
  230. metric->size_min = U64_MAX;
  231. metric->size_max = 0;
  232. metric->total = 0;
  233. metric->latency_sum = 0;
  234. metric->latency_avg = 0;
  235. metric->latency_sq_sum = 0;
  236. metric->latency_min = KTIME_MAX;
  237. metric->latency_max = 0;
  238. }
  239. atomic64_set(&m->opened_files, 0);
  240. ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
  241. if (ret)
  242. goto err_opened_inodes;
  243. ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
  244. if (ret)
  245. goto err_total_inodes;
  246. m->session = NULL;
  247. INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
  248. return 0;
  249. err_total_inodes:
  250. percpu_counter_destroy(&m->opened_inodes);
  251. err_opened_inodes:
  252. percpu_counter_destroy(&m->i_caps_mis);
  253. err_i_caps_mis:
  254. percpu_counter_destroy(&m->i_caps_hit);
  255. err_i_caps_hit:
  256. percpu_counter_destroy(&m->d_lease_mis);
  257. err_d_lease_mis:
  258. percpu_counter_destroy(&m->d_lease_hit);
  259. return ret;
  260. }
  261. void ceph_metric_destroy(struct ceph_client_metric *m)
  262. {
  263. if (!m)
  264. return;
  265. cancel_delayed_work_sync(&m->delayed_work);
  266. percpu_counter_destroy(&m->total_inodes);
  267. percpu_counter_destroy(&m->opened_inodes);
  268. percpu_counter_destroy(&m->i_caps_mis);
  269. percpu_counter_destroy(&m->i_caps_hit);
  270. percpu_counter_destroy(&m->d_lease_mis);
  271. percpu_counter_destroy(&m->d_lease_hit);
  272. ceph_put_mds_session(m->session);
  273. }
  274. #define METRIC_UPDATE_MIN_MAX(min, max, new) \
  275. { \
  276. if (unlikely(new < min)) \
  277. min = new; \
  278. if (unlikely(new > max)) \
  279. max = new; \
  280. }
  281. static inline void __update_mean_and_stdev(ktime_t total, ktime_t *lavg,
  282. ktime_t *sq_sump, ktime_t lat)
  283. {
  284. ktime_t avg;
  285. if (unlikely(total == 1)) {
  286. *lavg = lat;
  287. } else {
  288. /* the sq is (lat - old_avg) * (lat - new_avg) */
  289. avg = *lavg + div64_s64(lat - *lavg, total);
  290. *sq_sump += (lat - *lavg)*(lat - avg);
  291. *lavg = avg;
  292. }
  293. }
  294. void ceph_update_metrics(struct ceph_metric *m,
  295. ktime_t r_start, ktime_t r_end,
  296. unsigned int size, int rc)
  297. {
  298. ktime_t lat = ktime_sub(r_end, r_start);
  299. ktime_t total;
  300. if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
  301. return;
  302. spin_lock(&m->lock);
  303. total = ++m->total;
  304. m->size_sum += size;
  305. METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
  306. m->latency_sum += lat;
  307. METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
  308. __update_mean_and_stdev(total, &m->latency_avg, &m->latency_sq_sum,
  309. lat);
  310. spin_unlock(&m->lock);
  311. }