namei.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /* CacheFiles path walking and related routines
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells ([email protected])
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/file.h>
  14. #include <linux/fs.h>
  15. #include <linux/fsnotify.h>
  16. #include <linux/quotaops.h>
  17. #include <linux/xattr.h>
  18. #include <linux/mount.h>
  19. #include <linux/namei.h>
  20. #include <linux/security.h>
  21. #include <linux/slab.h>
  22. #include <linux/xattr.h>
  23. #include "internal.h"
  24. #define CACHEFILES_KEYBUF_SIZE 512
  25. /*
  26. * dump debugging info about an object
  27. */
  28. static noinline
  29. void __cachefiles_printk_object(struct cachefiles_object *object,
  30. const char *prefix,
  31. u8 *keybuf)
  32. {
  33. struct fscache_cookie *cookie;
  34. unsigned keylen, loop;
  35. pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id);
  36. pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
  37. prefix, object->fscache.state->name,
  38. object->fscache.flags, work_busy(&object->fscache.work),
  39. object->fscache.events, object->fscache.event_mask);
  40. pr_err("%sops=%u inp=%u exc=%u\n",
  41. prefix, object->fscache.n_ops, object->fscache.n_in_progress,
  42. object->fscache.n_exclusive);
  43. pr_err("%sparent=%p\n",
  44. prefix, object->fscache.parent);
  45. spin_lock(&object->fscache.lock);
  46. cookie = object->fscache.cookie;
  47. if (cookie) {
  48. pr_err("%scookie=%p [pr=%p nd=%p fl=%lx]\n",
  49. prefix,
  50. object->fscache.cookie,
  51. object->fscache.cookie->parent,
  52. object->fscache.cookie->netfs_data,
  53. object->fscache.cookie->flags);
  54. if (keybuf && cookie->def)
  55. keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
  56. CACHEFILES_KEYBUF_SIZE);
  57. else
  58. keylen = 0;
  59. } else {
  60. pr_err("%scookie=NULL\n", prefix);
  61. keylen = 0;
  62. }
  63. spin_unlock(&object->fscache.lock);
  64. if (keylen) {
  65. pr_err("%skey=[%u] '", prefix, keylen);
  66. for (loop = 0; loop < keylen; loop++)
  67. pr_cont("%02x", keybuf[loop]);
  68. pr_cont("'\n");
  69. }
  70. }
  71. /*
  72. * dump debugging info about a pair of objects
  73. */
  74. static noinline void cachefiles_printk_object(struct cachefiles_object *object,
  75. struct cachefiles_object *xobject)
  76. {
  77. u8 *keybuf;
  78. keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
  79. if (object)
  80. __cachefiles_printk_object(object, "", keybuf);
  81. if (xobject)
  82. __cachefiles_printk_object(xobject, "x", keybuf);
  83. kfree(keybuf);
  84. }
  85. /*
  86. * mark the owner of a dentry, if there is one, to indicate that that dentry
  87. * has been preemptively deleted
  88. * - the caller must hold the i_mutex on the dentry's parent as required to
  89. * call vfs_unlink(), vfs_rmdir() or vfs_rename()
  90. */
  91. static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
  92. struct dentry *dentry,
  93. enum fscache_why_object_killed why)
  94. {
  95. struct cachefiles_object *object;
  96. struct rb_node *p;
  97. _enter(",'%pd'", dentry);
  98. write_lock(&cache->active_lock);
  99. p = cache->active_nodes.rb_node;
  100. while (p) {
  101. object = rb_entry(p, struct cachefiles_object, active_node);
  102. if (object->dentry > dentry)
  103. p = p->rb_left;
  104. else if (object->dentry < dentry)
  105. p = p->rb_right;
  106. else
  107. goto found_dentry;
  108. }
  109. write_unlock(&cache->active_lock);
  110. _leave(" [no owner]");
  111. return;
  112. /* found the dentry for */
  113. found_dentry:
  114. kdebug("preemptive burial: OBJ%x [%s] %p",
  115. object->fscache.debug_id,
  116. object->fscache.state->name,
  117. dentry);
  118. if (fscache_object_is_live(&object->fscache)) {
  119. pr_err("\n");
  120. pr_err("Error: Can't preemptively bury live object\n");
  121. cachefiles_printk_object(object, NULL);
  122. } else {
  123. if (why != FSCACHE_OBJECT_IS_STALE)
  124. fscache_object_mark_killed(&object->fscache, why);
  125. }
  126. write_unlock(&cache->active_lock);
  127. _leave(" [owner marked]");
  128. }
  129. /*
  130. * record the fact that an object is now active
  131. */
  132. static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
  133. struct cachefiles_object *object)
  134. {
  135. struct cachefiles_object *xobject;
  136. struct rb_node **_p, *_parent = NULL;
  137. struct dentry *dentry;
  138. _enter(",%p", object);
  139. try_again:
  140. write_lock(&cache->active_lock);
  141. if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
  142. pr_err("Error: Object already active\n");
  143. cachefiles_printk_object(object, NULL);
  144. BUG();
  145. }
  146. dentry = object->dentry;
  147. _p = &cache->active_nodes.rb_node;
  148. while (*_p) {
  149. _parent = *_p;
  150. xobject = rb_entry(_parent,
  151. struct cachefiles_object, active_node);
  152. ASSERT(xobject != object);
  153. if (xobject->dentry > dentry)
  154. _p = &(*_p)->rb_left;
  155. else if (xobject->dentry < dentry)
  156. _p = &(*_p)->rb_right;
  157. else
  158. goto wait_for_old_object;
  159. }
  160. rb_link_node(&object->active_node, _parent, _p);
  161. rb_insert_color(&object->active_node, &cache->active_nodes);
  162. write_unlock(&cache->active_lock);
  163. _leave(" = 0");
  164. return 0;
  165. /* an old object from a previous incarnation is hogging the slot - we
  166. * need to wait for it to be destroyed */
  167. wait_for_old_object:
  168. if (fscache_object_is_live(&xobject->fscache)) {
  169. pr_err("\n");
  170. pr_err("Error: Unexpected object collision\n");
  171. cachefiles_printk_object(object, xobject);
  172. }
  173. atomic_inc(&xobject->usage);
  174. write_unlock(&cache->active_lock);
  175. if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  176. wait_queue_head_t *wq;
  177. signed long timeout = 60 * HZ;
  178. wait_queue_t wait;
  179. bool requeue;
  180. /* if the object we're waiting for is queued for processing,
  181. * then just put ourselves on the queue behind it */
  182. if (work_pending(&xobject->fscache.work)) {
  183. _debug("queue OBJ%x behind OBJ%x immediately",
  184. object->fscache.debug_id,
  185. xobject->fscache.debug_id);
  186. goto requeue;
  187. }
  188. /* otherwise we sleep until either the object we're waiting for
  189. * is done, or the fscache_object is congested */
  190. wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
  191. init_wait(&wait);
  192. requeue = false;
  193. do {
  194. prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
  195. if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
  196. break;
  197. requeue = fscache_object_sleep_till_congested(&timeout);
  198. } while (timeout > 0 && !requeue);
  199. finish_wait(wq, &wait);
  200. if (requeue &&
  201. test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
  202. _debug("queue OBJ%x behind OBJ%x after wait",
  203. object->fscache.debug_id,
  204. xobject->fscache.debug_id);
  205. goto requeue;
  206. }
  207. if (timeout <= 0) {
  208. pr_err("\n");
  209. pr_err("Error: Overlong wait for old active object to go away\n");
  210. cachefiles_printk_object(object, xobject);
  211. goto requeue;
  212. }
  213. }
  214. ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
  215. cache->cache.ops->put_object(&xobject->fscache);
  216. goto try_again;
  217. requeue:
  218. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  219. cache->cache.ops->put_object(&xobject->fscache);
  220. _leave(" = -ETIMEDOUT");
  221. return -ETIMEDOUT;
  222. }
  223. /*
  224. * Mark an object as being inactive.
  225. */
  226. void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
  227. struct cachefiles_object *object,
  228. blkcnt_t i_blocks)
  229. {
  230. write_lock(&cache->active_lock);
  231. rb_erase(&object->active_node, &cache->active_nodes);
  232. clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
  233. write_unlock(&cache->active_lock);
  234. wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
  235. /* This object can now be culled, so we need to let the daemon know
  236. * that there is something it can remove if it needs to.
  237. */
  238. atomic_long_add(i_blocks, &cache->b_released);
  239. if (atomic_inc_return(&cache->f_released))
  240. cachefiles_state_changed(cache);
  241. }
  242. /*
  243. * delete an object representation from the cache
  244. * - file backed objects are unlinked
  245. * - directory backed objects are stuffed into the graveyard for userspace to
  246. * delete
  247. * - unlocks the directory mutex
  248. */
  249. static int cachefiles_bury_object(struct cachefiles_cache *cache,
  250. struct dentry *dir,
  251. struct dentry *rep,
  252. bool preemptive,
  253. enum fscache_why_object_killed why)
  254. {
  255. struct dentry *grave, *trap;
  256. struct path path, path_to_graveyard;
  257. char nbuffer[8 + 8 + 1];
  258. int ret;
  259. _enter(",'%pd','%pd'", dir, rep);
  260. _debug("remove %p from %p", rep, dir);
  261. /* non-directories can just be unlinked */
  262. if (!d_is_dir(rep)) {
  263. _debug("unlink stale object");
  264. path.mnt = cache->mnt;
  265. path.dentry = dir;
  266. ret = security_path_unlink(&path, rep);
  267. if (ret < 0) {
  268. cachefiles_io_error(cache, "Unlink security error");
  269. } else {
  270. ret = vfs_unlink(d_inode(dir), rep, NULL);
  271. if (preemptive)
  272. cachefiles_mark_object_buried(cache, rep, why);
  273. }
  274. inode_unlock(d_inode(dir));
  275. if (ret == -EIO)
  276. cachefiles_io_error(cache, "Unlink failed");
  277. _leave(" = %d", ret);
  278. return ret;
  279. }
  280. /* directories have to be moved to the graveyard */
  281. _debug("move stale object to graveyard");
  282. inode_unlock(d_inode(dir));
  283. try_again:
  284. /* first step is to make up a grave dentry in the graveyard */
  285. sprintf(nbuffer, "%08x%08x",
  286. (uint32_t) get_seconds(),
  287. (uint32_t) atomic_inc_return(&cache->gravecounter));
  288. /* do the multiway lock magic */
  289. trap = lock_rename(cache->graveyard, dir);
  290. /* do some checks before getting the grave dentry */
  291. if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
  292. /* the entry was probably culled when we dropped the parent dir
  293. * lock */
  294. unlock_rename(cache->graveyard, dir);
  295. _leave(" = 0 [culled?]");
  296. return 0;
  297. }
  298. if (!d_can_lookup(cache->graveyard)) {
  299. unlock_rename(cache->graveyard, dir);
  300. cachefiles_io_error(cache, "Graveyard no longer a directory");
  301. return -EIO;
  302. }
  303. if (trap == rep) {
  304. unlock_rename(cache->graveyard, dir);
  305. cachefiles_io_error(cache, "May not make directory loop");
  306. return -EIO;
  307. }
  308. if (d_mountpoint(rep)) {
  309. unlock_rename(cache->graveyard, dir);
  310. cachefiles_io_error(cache, "Mountpoint in cache");
  311. return -EIO;
  312. }
  313. grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
  314. if (IS_ERR(grave)) {
  315. unlock_rename(cache->graveyard, dir);
  316. if (PTR_ERR(grave) == -ENOMEM) {
  317. _leave(" = -ENOMEM");
  318. return -ENOMEM;
  319. }
  320. cachefiles_io_error(cache, "Lookup error %ld",
  321. PTR_ERR(grave));
  322. return -EIO;
  323. }
  324. if (d_is_positive(grave)) {
  325. unlock_rename(cache->graveyard, dir);
  326. dput(grave);
  327. grave = NULL;
  328. cond_resched();
  329. goto try_again;
  330. }
  331. if (d_mountpoint(grave)) {
  332. unlock_rename(cache->graveyard, dir);
  333. dput(grave);
  334. cachefiles_io_error(cache, "Mountpoint in graveyard");
  335. return -EIO;
  336. }
  337. /* target should not be an ancestor of source */
  338. if (trap == grave) {
  339. unlock_rename(cache->graveyard, dir);
  340. dput(grave);
  341. cachefiles_io_error(cache, "May not make directory loop");
  342. return -EIO;
  343. }
  344. /* attempt the rename */
  345. path.mnt = cache->mnt;
  346. path.dentry = dir;
  347. path_to_graveyard.mnt = cache->mnt;
  348. path_to_graveyard.dentry = cache->graveyard;
  349. ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
  350. if (ret < 0) {
  351. cachefiles_io_error(cache, "Rename security error %d", ret);
  352. } else {
  353. ret = vfs_rename(d_inode(dir), rep,
  354. d_inode(cache->graveyard), grave, NULL, 0);
  355. if (ret != 0 && ret != -ENOMEM)
  356. cachefiles_io_error(cache,
  357. "Rename failed with error %d", ret);
  358. if (preemptive)
  359. cachefiles_mark_object_buried(cache, rep, why);
  360. }
  361. unlock_rename(cache->graveyard, dir);
  362. dput(grave);
  363. _leave(" = 0");
  364. return 0;
  365. }
  366. /*
  367. * delete an object representation from the cache
  368. */
  369. int cachefiles_delete_object(struct cachefiles_cache *cache,
  370. struct cachefiles_object *object)
  371. {
  372. struct dentry *dir;
  373. int ret;
  374. _enter(",OBJ%x{%p}", object->fscache.debug_id, object->dentry);
  375. ASSERT(object->dentry);
  376. ASSERT(d_backing_inode(object->dentry));
  377. ASSERT(object->dentry->d_parent);
  378. dir = dget_parent(object->dentry);
  379. inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
  380. if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
  381. /* object allocation for the same key preemptively deleted this
  382. * object's file so that it could create its own file */
  383. _debug("object preemptively buried");
  384. inode_unlock(d_inode(dir));
  385. ret = 0;
  386. } else {
  387. /* we need to check that our parent is _still_ our parent - it
  388. * may have been renamed */
  389. if (dir == object->dentry->d_parent) {
  390. ret = cachefiles_bury_object(cache, dir,
  391. object->dentry, false,
  392. FSCACHE_OBJECT_WAS_RETIRED);
  393. } else {
  394. /* it got moved, presumably by cachefilesd culling it,
  395. * so it's no longer in the key path and we can ignore
  396. * it */
  397. inode_unlock(d_inode(dir));
  398. ret = 0;
  399. }
  400. }
  401. dput(dir);
  402. _leave(" = %d", ret);
  403. return ret;
  404. }
  405. /*
  406. * walk from the parent object to the child object through the backing
  407. * filesystem, creating directories as we go
  408. */
  409. int cachefiles_walk_to_object(struct cachefiles_object *parent,
  410. struct cachefiles_object *object,
  411. const char *key,
  412. struct cachefiles_xattr *auxdata)
  413. {
  414. struct cachefiles_cache *cache;
  415. struct dentry *dir, *next = NULL;
  416. struct path path;
  417. unsigned long start;
  418. const char *name;
  419. int ret, nlen;
  420. _enter("OBJ%x{%p},OBJ%x,%s,",
  421. parent->fscache.debug_id, parent->dentry,
  422. object->fscache.debug_id, key);
  423. cache = container_of(parent->fscache.cache,
  424. struct cachefiles_cache, cache);
  425. path.mnt = cache->mnt;
  426. ASSERT(parent->dentry);
  427. ASSERT(d_backing_inode(parent->dentry));
  428. if (!(d_is_dir(parent->dentry))) {
  429. // TODO: convert file to dir
  430. _leave("looking up in none directory");
  431. return -ENOBUFS;
  432. }
  433. dir = dget(parent->dentry);
  434. advance:
  435. /* attempt to transit the first directory component */
  436. name = key;
  437. nlen = strlen(key);
  438. /* key ends in a double NUL */
  439. key = key + nlen + 1;
  440. if (!*key)
  441. key = NULL;
  442. lookup_again:
  443. /* search the current directory for the element name */
  444. _debug("lookup '%s'", name);
  445. inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
  446. start = jiffies;
  447. next = lookup_one_len(name, dir, nlen);
  448. cachefiles_hist(cachefiles_lookup_histogram, start);
  449. if (IS_ERR(next))
  450. goto lookup_error;
  451. _debug("next -> %p %s", next, d_backing_inode(next) ? "positive" : "negative");
  452. if (!key)
  453. object->new = !d_backing_inode(next);
  454. /* if this element of the path doesn't exist, then the lookup phase
  455. * failed, and we can release any readers in the certain knowledge that
  456. * there's nothing for them to actually read */
  457. if (d_is_negative(next))
  458. fscache_object_lookup_negative(&object->fscache);
  459. /* we need to create the object if it's negative */
  460. if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
  461. /* index objects and intervening tree levels must be subdirs */
  462. if (d_is_negative(next)) {
  463. ret = cachefiles_has_space(cache, 1, 0);
  464. if (ret < 0)
  465. goto no_space_error;
  466. path.dentry = dir;
  467. ret = security_path_mkdir(&path, next, 0);
  468. if (ret < 0)
  469. goto create_error;
  470. start = jiffies;
  471. ret = vfs_mkdir(d_inode(dir), next, 0);
  472. cachefiles_hist(cachefiles_mkdir_histogram, start);
  473. if (ret < 0)
  474. goto create_error;
  475. ASSERT(d_backing_inode(next));
  476. _debug("mkdir -> %p{%p{ino=%lu}}",
  477. next, d_backing_inode(next), d_backing_inode(next)->i_ino);
  478. } else if (!d_can_lookup(next)) {
  479. pr_err("inode %lu is not a directory\n",
  480. d_backing_inode(next)->i_ino);
  481. ret = -ENOBUFS;
  482. goto error;
  483. }
  484. } else {
  485. /* non-index objects start out life as files */
  486. if (d_is_negative(next)) {
  487. ret = cachefiles_has_space(cache, 1, 0);
  488. if (ret < 0)
  489. goto no_space_error;
  490. path.dentry = dir;
  491. ret = security_path_mknod(&path, next, S_IFREG, 0);
  492. if (ret < 0)
  493. goto create_error;
  494. start = jiffies;
  495. ret = vfs_create(d_inode(dir), next, S_IFREG, true);
  496. cachefiles_hist(cachefiles_create_histogram, start);
  497. if (ret < 0)
  498. goto create_error;
  499. ASSERT(d_backing_inode(next));
  500. _debug("create -> %p{%p{ino=%lu}}",
  501. next, d_backing_inode(next), d_backing_inode(next)->i_ino);
  502. } else if (!d_can_lookup(next) &&
  503. !d_is_reg(next)
  504. ) {
  505. pr_err("inode %lu is not a file or directory\n",
  506. d_backing_inode(next)->i_ino);
  507. ret = -ENOBUFS;
  508. goto error;
  509. }
  510. }
  511. /* process the next component */
  512. if (key) {
  513. _debug("advance");
  514. inode_unlock(d_inode(dir));
  515. dput(dir);
  516. dir = next;
  517. next = NULL;
  518. goto advance;
  519. }
  520. /* we've found the object we were looking for */
  521. object->dentry = next;
  522. /* if we've found that the terminal object exists, then we need to
  523. * check its attributes and delete it if it's out of date */
  524. if (!object->new) {
  525. _debug("validate '%pd'", next);
  526. ret = cachefiles_check_object_xattr(object, auxdata);
  527. if (ret == -ESTALE) {
  528. /* delete the object (the deleter drops the directory
  529. * mutex) */
  530. object->dentry = NULL;
  531. ret = cachefiles_bury_object(cache, dir, next, true,
  532. FSCACHE_OBJECT_IS_STALE);
  533. dput(next);
  534. next = NULL;
  535. if (ret < 0)
  536. goto delete_error;
  537. _debug("redo lookup");
  538. fscache_object_retrying_stale(&object->fscache);
  539. goto lookup_again;
  540. }
  541. }
  542. /* note that we're now using this object */
  543. ret = cachefiles_mark_object_active(cache, object);
  544. inode_unlock(d_inode(dir));
  545. dput(dir);
  546. dir = NULL;
  547. if (ret == -ETIMEDOUT)
  548. goto mark_active_timed_out;
  549. _debug("=== OBTAINED_OBJECT ===");
  550. if (object->new) {
  551. /* attach data to a newly constructed terminal object */
  552. ret = cachefiles_set_object_xattr(object, auxdata);
  553. if (ret < 0)
  554. goto check_error;
  555. } else {
  556. /* always update the atime on an object we've just looked up
  557. * (this is used to keep track of culling, and atimes are only
  558. * updated by read, write and readdir but not lookup or
  559. * open) */
  560. path.dentry = next;
  561. touch_atime(&path);
  562. }
  563. /* open a file interface onto a data file */
  564. if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
  565. if (d_is_reg(object->dentry)) {
  566. const struct address_space_operations *aops;
  567. ret = -EPERM;
  568. aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
  569. if (!aops->bmap)
  570. goto check_error;
  571. if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
  572. goto check_error;
  573. object->backer = object->dentry;
  574. } else {
  575. BUG(); // TODO: open file in data-class subdir
  576. }
  577. }
  578. object->new = 0;
  579. fscache_obtained_object(&object->fscache);
  580. _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
  581. return 0;
  582. no_space_error:
  583. fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
  584. create_error:
  585. _debug("create error %d", ret);
  586. if (ret == -EIO)
  587. cachefiles_io_error(cache, "Create/mkdir failed");
  588. goto error;
  589. mark_active_timed_out:
  590. _debug("mark active timed out");
  591. goto release_dentry;
  592. check_error:
  593. _debug("check error %d", ret);
  594. cachefiles_mark_object_inactive(
  595. cache, object, d_backing_inode(object->dentry)->i_blocks);
  596. release_dentry:
  597. dput(object->dentry);
  598. object->dentry = NULL;
  599. goto error_out;
  600. delete_error:
  601. _debug("delete error %d", ret);
  602. goto error_out2;
  603. lookup_error:
  604. _debug("lookup error %ld", PTR_ERR(next));
  605. ret = PTR_ERR(next);
  606. if (ret == -EIO)
  607. cachefiles_io_error(cache, "Lookup failed");
  608. next = NULL;
  609. error:
  610. inode_unlock(d_inode(dir));
  611. dput(next);
  612. error_out2:
  613. dput(dir);
  614. error_out:
  615. _leave(" = error %d", -ret);
  616. return ret;
  617. }
  618. /*
  619. * get a subdirectory
  620. */
  621. struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
  622. struct dentry *dir,
  623. const char *dirname)
  624. {
  625. struct dentry *subdir;
  626. unsigned long start;
  627. struct path path;
  628. int ret;
  629. _enter(",,%s", dirname);
  630. /* search the current directory for the element name */
  631. inode_lock(d_inode(dir));
  632. start = jiffies;
  633. subdir = lookup_one_len(dirname, dir, strlen(dirname));
  634. cachefiles_hist(cachefiles_lookup_histogram, start);
  635. if (IS_ERR(subdir)) {
  636. if (PTR_ERR(subdir) == -ENOMEM)
  637. goto nomem_d_alloc;
  638. goto lookup_error;
  639. }
  640. _debug("subdir -> %p %s",
  641. subdir, d_backing_inode(subdir) ? "positive" : "negative");
  642. /* we need to create the subdir if it doesn't exist yet */
  643. if (d_is_negative(subdir)) {
  644. ret = cachefiles_has_space(cache, 1, 0);
  645. if (ret < 0)
  646. goto mkdir_error;
  647. _debug("attempt mkdir");
  648. path.mnt = cache->mnt;
  649. path.dentry = dir;
  650. ret = security_path_mkdir(&path, subdir, 0700);
  651. if (ret < 0)
  652. goto mkdir_error;
  653. ret = vfs_mkdir(d_inode(dir), subdir, 0700);
  654. if (ret < 0)
  655. goto mkdir_error;
  656. ASSERT(d_backing_inode(subdir));
  657. _debug("mkdir -> %p{%p{ino=%lu}}",
  658. subdir,
  659. d_backing_inode(subdir),
  660. d_backing_inode(subdir)->i_ino);
  661. }
  662. inode_unlock(d_inode(dir));
  663. /* we need to make sure the subdir is a directory */
  664. ASSERT(d_backing_inode(subdir));
  665. if (!d_can_lookup(subdir)) {
  666. pr_err("%s is not a directory\n", dirname);
  667. ret = -EIO;
  668. goto check_error;
  669. }
  670. ret = -EPERM;
  671. if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
  672. !d_backing_inode(subdir)->i_op->lookup ||
  673. !d_backing_inode(subdir)->i_op->mkdir ||
  674. !d_backing_inode(subdir)->i_op->create ||
  675. !d_backing_inode(subdir)->i_op->rename ||
  676. !d_backing_inode(subdir)->i_op->rmdir ||
  677. !d_backing_inode(subdir)->i_op->unlink)
  678. goto check_error;
  679. _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
  680. return subdir;
  681. check_error:
  682. dput(subdir);
  683. _leave(" = %d [check]", ret);
  684. return ERR_PTR(ret);
  685. mkdir_error:
  686. inode_unlock(d_inode(dir));
  687. dput(subdir);
  688. pr_err("mkdir %s failed with error %d\n", dirname, ret);
  689. return ERR_PTR(ret);
  690. lookup_error:
  691. inode_unlock(d_inode(dir));
  692. ret = PTR_ERR(subdir);
  693. pr_err("Lookup %s failed with error %d\n", dirname, ret);
  694. return ERR_PTR(ret);
  695. nomem_d_alloc:
  696. inode_unlock(d_inode(dir));
  697. _leave(" = -ENOMEM");
  698. return ERR_PTR(-ENOMEM);
  699. }
  700. /*
  701. * find out if an object is in use or not
  702. * - if finds object and it's not in use:
  703. * - returns a pointer to the object and a reference on it
  704. * - returns with the directory locked
  705. */
  706. static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
  707. struct dentry *dir,
  708. char *filename)
  709. {
  710. struct cachefiles_object *object;
  711. struct rb_node *_n;
  712. struct dentry *victim;
  713. unsigned long start;
  714. int ret;
  715. //_enter(",%pd/,%s",
  716. // dir, filename);
  717. /* look up the victim */
  718. inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
  719. start = jiffies;
  720. victim = lookup_one_len(filename, dir, strlen(filename));
  721. cachefiles_hist(cachefiles_lookup_histogram, start);
  722. if (IS_ERR(victim))
  723. goto lookup_error;
  724. //_debug("victim -> %p %s",
  725. // victim, d_backing_inode(victim) ? "positive" : "negative");
  726. /* if the object is no longer there then we probably retired the object
  727. * at the netfs's request whilst the cull was in progress
  728. */
  729. if (d_is_negative(victim)) {
  730. inode_unlock(d_inode(dir));
  731. dput(victim);
  732. _leave(" = -ENOENT [absent]");
  733. return ERR_PTR(-ENOENT);
  734. }
  735. /* check to see if we're using this object */
  736. read_lock(&cache->active_lock);
  737. _n = cache->active_nodes.rb_node;
  738. while (_n) {
  739. object = rb_entry(_n, struct cachefiles_object, active_node);
  740. if (object->dentry > victim)
  741. _n = _n->rb_left;
  742. else if (object->dentry < victim)
  743. _n = _n->rb_right;
  744. else
  745. goto object_in_use;
  746. }
  747. read_unlock(&cache->active_lock);
  748. //_leave(" = %p", victim);
  749. return victim;
  750. object_in_use:
  751. read_unlock(&cache->active_lock);
  752. inode_unlock(d_inode(dir));
  753. dput(victim);
  754. //_leave(" = -EBUSY [in use]");
  755. return ERR_PTR(-EBUSY);
  756. lookup_error:
  757. inode_unlock(d_inode(dir));
  758. ret = PTR_ERR(victim);
  759. if (ret == -ENOENT) {
  760. /* file or dir now absent - probably retired by netfs */
  761. _leave(" = -ESTALE [absent]");
  762. return ERR_PTR(-ESTALE);
  763. }
  764. if (ret == -EIO) {
  765. cachefiles_io_error(cache, "Lookup failed");
  766. } else if (ret != -ENOMEM) {
  767. pr_err("Internal error: %d\n", ret);
  768. ret = -EIO;
  769. }
  770. _leave(" = %d", ret);
  771. return ERR_PTR(ret);
  772. }
  773. /*
  774. * cull an object if it's not in use
  775. * - called only by cache manager daemon
  776. */
  777. int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
  778. char *filename)
  779. {
  780. struct dentry *victim;
  781. int ret;
  782. _enter(",%pd/,%s", dir, filename);
  783. victim = cachefiles_check_active(cache, dir, filename);
  784. if (IS_ERR(victim))
  785. return PTR_ERR(victim);
  786. _debug("victim -> %p %s",
  787. victim, d_backing_inode(victim) ? "positive" : "negative");
  788. /* okay... the victim is not being used so we can cull it
  789. * - start by marking it as stale
  790. */
  791. _debug("victim is cullable");
  792. ret = cachefiles_remove_object_xattr(cache, victim);
  793. if (ret < 0)
  794. goto error_unlock;
  795. /* actually remove the victim (drops the dir mutex) */
  796. _debug("bury");
  797. ret = cachefiles_bury_object(cache, dir, victim, false,
  798. FSCACHE_OBJECT_WAS_CULLED);
  799. if (ret < 0)
  800. goto error;
  801. dput(victim);
  802. _leave(" = 0");
  803. return 0;
  804. error_unlock:
  805. inode_unlock(d_inode(dir));
  806. error:
  807. dput(victim);
  808. if (ret == -ENOENT) {
  809. /* file or dir now absent - probably retired by netfs */
  810. _leave(" = -ESTALE [absent]");
  811. return -ESTALE;
  812. }
  813. if (ret != -ENOMEM) {
  814. pr_err("Internal error: %d\n", ret);
  815. ret = -EIO;
  816. }
  817. _leave(" = %d", ret);
  818. return ret;
  819. }
  820. /*
  821. * find out if an object is in use or not
  822. * - called only by cache manager daemon
  823. * - returns -EBUSY or 0 to indicate whether an object is in use or not
  824. */
  825. int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
  826. char *filename)
  827. {
  828. struct dentry *victim;
  829. //_enter(",%pd/,%s",
  830. // dir, filename);
  831. victim = cachefiles_check_active(cache, dir, filename);
  832. if (IS_ERR(victim))
  833. return PTR_ERR(victim);
  834. inode_unlock(d_inode(dir));
  835. dput(victim);
  836. //_leave(" = 0");
  837. return 0;
  838. }