list_lru.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/memcontrol.h>
  14. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  15. static LIST_HEAD(list_lrus);
  16. static DEFINE_MUTEX(list_lrus_mutex);
  17. static void list_lru_register(struct list_lru *lru)
  18. {
  19. mutex_lock(&list_lrus_mutex);
  20. list_add(&lru->list, &list_lrus);
  21. mutex_unlock(&list_lrus_mutex);
  22. }
  23. static void list_lru_unregister(struct list_lru *lru)
  24. {
  25. mutex_lock(&list_lrus_mutex);
  26. list_del(&lru->list);
  27. mutex_unlock(&list_lrus_mutex);
  28. }
  29. #else
  30. static void list_lru_register(struct list_lru *lru)
  31. {
  32. }
  33. static void list_lru_unregister(struct list_lru *lru)
  34. {
  35. }
  36. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  37. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  38. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  39. {
  40. return lru->memcg_aware;
  41. }
  42. static inline struct list_lru_one *
  43. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  44. {
  45. /*
  46. * The lock protects the array of per cgroup lists from relocation
  47. * (see memcg_update_list_lru_node).
  48. */
  49. lockdep_assert_held(&nlru->lock);
  50. if (nlru->memcg_lrus && idx >= 0)
  51. return nlru->memcg_lrus->lru[idx];
  52. return &nlru->lru;
  53. }
  54. static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
  55. {
  56. struct page *page;
  57. if (!memcg_kmem_enabled())
  58. return NULL;
  59. page = virt_to_head_page(ptr);
  60. return page->mem_cgroup;
  61. }
  62. static inline struct list_lru_one *
  63. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  64. {
  65. struct mem_cgroup *memcg;
  66. if (!nlru->memcg_lrus)
  67. return &nlru->lru;
  68. memcg = mem_cgroup_from_kmem(ptr);
  69. if (!memcg)
  70. return &nlru->lru;
  71. return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
  72. }
  73. #else
  74. static inline bool list_lru_memcg_aware(struct list_lru *lru)
  75. {
  76. return false;
  77. }
  78. static inline struct list_lru_one *
  79. list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
  80. {
  81. return &nlru->lru;
  82. }
  83. static inline struct list_lru_one *
  84. list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
  85. {
  86. return &nlru->lru;
  87. }
  88. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  89. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  90. {
  91. int nid = page_to_nid(virt_to_page(item));
  92. struct list_lru_node *nlru = &lru->node[nid];
  93. struct list_lru_one *l;
  94. spin_lock(&nlru->lock);
  95. if (list_empty(item)) {
  96. l = list_lru_from_kmem(nlru, item);
  97. list_add_tail(item, &l->list);
  98. l->nr_items++;
  99. nlru->nr_items++;
  100. spin_unlock(&nlru->lock);
  101. return true;
  102. }
  103. spin_unlock(&nlru->lock);
  104. return false;
  105. }
  106. EXPORT_SYMBOL_GPL(list_lru_add);
  107. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  108. {
  109. int nid = page_to_nid(virt_to_page(item));
  110. struct list_lru_node *nlru = &lru->node[nid];
  111. struct list_lru_one *l;
  112. spin_lock(&nlru->lock);
  113. if (!list_empty(item)) {
  114. l = list_lru_from_kmem(nlru, item);
  115. list_del_init(item);
  116. l->nr_items--;
  117. nlru->nr_items--;
  118. spin_unlock(&nlru->lock);
  119. return true;
  120. }
  121. spin_unlock(&nlru->lock);
  122. return false;
  123. }
  124. EXPORT_SYMBOL_GPL(list_lru_del);
  125. void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
  126. {
  127. list_del_init(item);
  128. list->nr_items--;
  129. }
  130. EXPORT_SYMBOL_GPL(list_lru_isolate);
  131. void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
  132. struct list_head *head)
  133. {
  134. list_move(item, head);
  135. list->nr_items--;
  136. }
  137. EXPORT_SYMBOL_GPL(list_lru_isolate_move);
  138. static unsigned long __list_lru_count_one(struct list_lru *lru,
  139. int nid, int memcg_idx)
  140. {
  141. struct list_lru_node *nlru = &lru->node[nid];
  142. struct list_lru_one *l;
  143. unsigned long count;
  144. spin_lock(&nlru->lock);
  145. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  146. count = l->nr_items;
  147. spin_unlock(&nlru->lock);
  148. return count;
  149. }
  150. unsigned long list_lru_count_one(struct list_lru *lru,
  151. int nid, struct mem_cgroup *memcg)
  152. {
  153. return __list_lru_count_one(lru, nid, memcg_cache_id(memcg));
  154. }
  155. EXPORT_SYMBOL_GPL(list_lru_count_one);
  156. unsigned long list_lru_count_node(struct list_lru *lru, int nid)
  157. {
  158. struct list_lru_node *nlru;
  159. nlru = &lru->node[nid];
  160. return nlru->nr_items;
  161. }
  162. EXPORT_SYMBOL_GPL(list_lru_count_node);
  163. static unsigned long
  164. __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
  165. list_lru_walk_cb isolate, void *cb_arg,
  166. unsigned long *nr_to_walk)
  167. {
  168. struct list_lru_node *nlru = &lru->node[nid];
  169. struct list_lru_one *l;
  170. struct list_head *item, *n;
  171. unsigned long isolated = 0;
  172. spin_lock(&nlru->lock);
  173. l = list_lru_from_memcg_idx(nlru, memcg_idx);
  174. restart:
  175. list_for_each_safe(item, n, &l->list) {
  176. enum lru_status ret;
  177. /*
  178. * decrement nr_to_walk first so that we don't livelock if we
  179. * get stuck on large numbesr of LRU_RETRY items
  180. */
  181. if (!*nr_to_walk)
  182. break;
  183. --*nr_to_walk;
  184. ret = isolate(item, l, &nlru->lock, cb_arg);
  185. switch (ret) {
  186. case LRU_REMOVED_RETRY:
  187. assert_spin_locked(&nlru->lock);
  188. case LRU_REMOVED:
  189. isolated++;
  190. nlru->nr_items--;
  191. /*
  192. * If the lru lock has been dropped, our list
  193. * traversal is now invalid and so we have to
  194. * restart from scratch.
  195. */
  196. if (ret == LRU_REMOVED_RETRY)
  197. goto restart;
  198. break;
  199. case LRU_ROTATE:
  200. list_move_tail(item, &l->list);
  201. break;
  202. case LRU_SKIP:
  203. break;
  204. case LRU_RETRY:
  205. /*
  206. * The lru lock has been dropped, our list traversal is
  207. * now invalid and so we have to restart from scratch.
  208. */
  209. assert_spin_locked(&nlru->lock);
  210. goto restart;
  211. default:
  212. BUG();
  213. }
  214. }
  215. spin_unlock(&nlru->lock);
  216. return isolated;
  217. }
  218. unsigned long
  219. list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
  220. list_lru_walk_cb isolate, void *cb_arg,
  221. unsigned long *nr_to_walk)
  222. {
  223. return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
  224. isolate, cb_arg, nr_to_walk);
  225. }
  226. EXPORT_SYMBOL_GPL(list_lru_walk_one);
  227. unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
  228. list_lru_walk_cb isolate, void *cb_arg,
  229. unsigned long *nr_to_walk)
  230. {
  231. long isolated = 0;
  232. int memcg_idx;
  233. isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
  234. nr_to_walk);
  235. if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
  236. for_each_memcg_cache_index(memcg_idx) {
  237. isolated += __list_lru_walk_one(lru, nid, memcg_idx,
  238. isolate, cb_arg, nr_to_walk);
  239. if (*nr_to_walk <= 0)
  240. break;
  241. }
  242. }
  243. return isolated;
  244. }
  245. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  246. static void init_one_lru(struct list_lru_one *l)
  247. {
  248. INIT_LIST_HEAD(&l->list);
  249. l->nr_items = 0;
  250. }
  251. #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
  252. static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
  253. int begin, int end)
  254. {
  255. int i;
  256. for (i = begin; i < end; i++)
  257. kfree(memcg_lrus->lru[i]);
  258. }
  259. static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
  260. int begin, int end)
  261. {
  262. int i;
  263. for (i = begin; i < end; i++) {
  264. struct list_lru_one *l;
  265. l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
  266. if (!l)
  267. goto fail;
  268. init_one_lru(l);
  269. memcg_lrus->lru[i] = l;
  270. }
  271. return 0;
  272. fail:
  273. __memcg_destroy_list_lru_node(memcg_lrus, begin, i);
  274. return -ENOMEM;
  275. }
  276. static int memcg_init_list_lru_node(struct list_lru_node *nlru)
  277. {
  278. int size = memcg_nr_cache_ids;
  279. nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
  280. if (!nlru->memcg_lrus)
  281. return -ENOMEM;
  282. if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) {
  283. kfree(nlru->memcg_lrus);
  284. return -ENOMEM;
  285. }
  286. return 0;
  287. }
  288. static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
  289. {
  290. __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids);
  291. kfree(nlru->memcg_lrus);
  292. }
  293. static int memcg_update_list_lru_node(struct list_lru_node *nlru,
  294. int old_size, int new_size)
  295. {
  296. struct list_lru_memcg *old, *new;
  297. BUG_ON(old_size > new_size);
  298. old = nlru->memcg_lrus;
  299. new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
  300. if (!new)
  301. return -ENOMEM;
  302. if (__memcg_init_list_lru_node(new, old_size, new_size)) {
  303. kfree(new);
  304. return -ENOMEM;
  305. }
  306. memcpy(new, old, old_size * sizeof(void *));
  307. /*
  308. * The lock guarantees that we won't race with a reader
  309. * (see list_lru_from_memcg_idx).
  310. *
  311. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  312. * we have to use IRQ-safe primitives here to avoid deadlock.
  313. */
  314. spin_lock_irq(&nlru->lock);
  315. nlru->memcg_lrus = new;
  316. spin_unlock_irq(&nlru->lock);
  317. kfree(old);
  318. return 0;
  319. }
  320. static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
  321. int old_size, int new_size)
  322. {
  323. /* do not bother shrinking the array back to the old size, because we
  324. * cannot handle allocation failures here */
  325. __memcg_destroy_list_lru_node(nlru->memcg_lrus, old_size, new_size);
  326. }
  327. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  328. {
  329. int i;
  330. lru->memcg_aware = memcg_aware;
  331. if (!memcg_aware)
  332. return 0;
  333. for_each_node(i) {
  334. if (memcg_init_list_lru_node(&lru->node[i]))
  335. goto fail;
  336. }
  337. return 0;
  338. fail:
  339. for (i = i - 1; i >= 0; i--) {
  340. if (!lru->node[i].memcg_lrus)
  341. continue;
  342. memcg_destroy_list_lru_node(&lru->node[i]);
  343. }
  344. return -ENOMEM;
  345. }
  346. static void memcg_destroy_list_lru(struct list_lru *lru)
  347. {
  348. int i;
  349. if (!list_lru_memcg_aware(lru))
  350. return;
  351. for_each_node(i)
  352. memcg_destroy_list_lru_node(&lru->node[i]);
  353. }
  354. static int memcg_update_list_lru(struct list_lru *lru,
  355. int old_size, int new_size)
  356. {
  357. int i;
  358. if (!list_lru_memcg_aware(lru))
  359. return 0;
  360. for_each_node(i) {
  361. if (memcg_update_list_lru_node(&lru->node[i],
  362. old_size, new_size))
  363. goto fail;
  364. }
  365. return 0;
  366. fail:
  367. for (i = i - 1; i >= 0; i--) {
  368. if (!lru->node[i].memcg_lrus)
  369. continue;
  370. memcg_cancel_update_list_lru_node(&lru->node[i],
  371. old_size, new_size);
  372. }
  373. return -ENOMEM;
  374. }
  375. static void memcg_cancel_update_list_lru(struct list_lru *lru,
  376. int old_size, int new_size)
  377. {
  378. int i;
  379. if (!list_lru_memcg_aware(lru))
  380. return;
  381. for_each_node(i)
  382. memcg_cancel_update_list_lru_node(&lru->node[i],
  383. old_size, new_size);
  384. }
  385. int memcg_update_all_list_lrus(int new_size)
  386. {
  387. int ret = 0;
  388. struct list_lru *lru;
  389. int old_size = memcg_nr_cache_ids;
  390. mutex_lock(&list_lrus_mutex);
  391. list_for_each_entry(lru, &list_lrus, list) {
  392. ret = memcg_update_list_lru(lru, old_size, new_size);
  393. if (ret)
  394. goto fail;
  395. }
  396. out:
  397. mutex_unlock(&list_lrus_mutex);
  398. return ret;
  399. fail:
  400. list_for_each_entry_continue_reverse(lru, &list_lrus, list)
  401. memcg_cancel_update_list_lru(lru, old_size, new_size);
  402. goto out;
  403. }
  404. static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
  405. int src_idx, int dst_idx)
  406. {
  407. struct list_lru_one *src, *dst;
  408. /*
  409. * Since list_lru_{add,del} may be called under an IRQ-safe lock,
  410. * we have to use IRQ-safe primitives here to avoid deadlock.
  411. */
  412. spin_lock_irq(&nlru->lock);
  413. src = list_lru_from_memcg_idx(nlru, src_idx);
  414. dst = list_lru_from_memcg_idx(nlru, dst_idx);
  415. list_splice_init(&src->list, &dst->list);
  416. dst->nr_items += src->nr_items;
  417. src->nr_items = 0;
  418. spin_unlock_irq(&nlru->lock);
  419. }
  420. static void memcg_drain_list_lru(struct list_lru *lru,
  421. int src_idx, int dst_idx)
  422. {
  423. int i;
  424. if (!list_lru_memcg_aware(lru))
  425. return;
  426. for_each_node(i)
  427. memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
  428. }
  429. void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
  430. {
  431. struct list_lru *lru;
  432. mutex_lock(&list_lrus_mutex);
  433. list_for_each_entry(lru, &list_lrus, list)
  434. memcg_drain_list_lru(lru, src_idx, dst_idx);
  435. mutex_unlock(&list_lrus_mutex);
  436. }
  437. #else
  438. static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
  439. {
  440. return 0;
  441. }
  442. static void memcg_destroy_list_lru(struct list_lru *lru)
  443. {
  444. }
  445. #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
  446. int __list_lru_init(struct list_lru *lru, bool memcg_aware,
  447. struct lock_class_key *key)
  448. {
  449. int i;
  450. size_t size = sizeof(*lru->node) * nr_node_ids;
  451. int err = -ENOMEM;
  452. memcg_get_cache_ids();
  453. lru->node = kzalloc(size, GFP_KERNEL);
  454. if (!lru->node)
  455. goto out;
  456. for_each_node(i) {
  457. spin_lock_init(&lru->node[i].lock);
  458. if (key)
  459. lockdep_set_class(&lru->node[i].lock, key);
  460. init_one_lru(&lru->node[i].lru);
  461. }
  462. err = memcg_init_list_lru(lru, memcg_aware);
  463. if (err) {
  464. kfree(lru->node);
  465. /* Do this so a list_lru_destroy() doesn't crash: */
  466. lru->node = NULL;
  467. goto out;
  468. }
  469. list_lru_register(lru);
  470. out:
  471. memcg_put_cache_ids();
  472. return err;
  473. }
  474. EXPORT_SYMBOL_GPL(__list_lru_init);
  475. void list_lru_destroy(struct list_lru *lru)
  476. {
  477. /* Already destroyed or not yet initialized? */
  478. if (!lru->node)
  479. return;
  480. memcg_get_cache_ids();
  481. list_lru_unregister(lru);
  482. memcg_destroy_list_lru(lru);
  483. kfree(lru->node);
  484. lru->node = NULL;
  485. memcg_put_cache_ids();
  486. }
  487. EXPORT_SYMBOL_GPL(list_lru_destroy);