addr.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999
  1. #include <linux/ceph/ceph_debug.h>
  2. #include <linux/backing-dev.h>
  3. #include <linux/fs.h>
  4. #include <linux/mm.h>
  5. #include <linux/pagemap.h>
  6. #include <linux/writeback.h> /* generic_writepages */
  7. #include <linux/slab.h>
  8. #include <linux/pagevec.h>
  9. #include <linux/task_io_accounting_ops.h>
  10. #include "super.h"
  11. #include "mds_client.h"
  12. #include "cache.h"
  13. #include <linux/ceph/osd_client.h>
  14. /*
  15. * Ceph address space ops.
  16. *
  17. * There are a few funny things going on here.
  18. *
  19. * The page->private field is used to reference a struct
  20. * ceph_snap_context for _every_ dirty page. This indicates which
  21. * snapshot the page was logically dirtied in, and thus which snap
  22. * context needs to be associated with the osd write during writeback.
  23. *
  24. * Similarly, struct ceph_inode_info maintains a set of counters to
  25. * count dirty pages on the inode. In the absence of snapshots,
  26. * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
  27. *
  28. * When a snapshot is taken (that is, when the client receives
  29. * notification that a snapshot was taken), each inode with caps and
  30. * with dirty pages (dirty pages implies there is a cap) gets a new
  31. * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
  32. * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
  33. * moved to capsnap->dirty. (Unless a sync write is currently in
  34. * progress. In that case, the capsnap is said to be "pending", new
  35. * writes cannot start, and the capsnap isn't "finalized" until the
  36. * write completes (or fails) and a final size/mtime for the inode for
  37. * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
  38. *
  39. * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
  40. * we look for the first capsnap in i_cap_snaps and write out pages in
  41. * that snap context _only_. Then we move on to the next capsnap,
  42. * eventually reaching the "live" or "head" context (i.e., pages that
  43. * are not yet snapped) and are writing the most recently dirtied
  44. * pages.
  45. *
  46. * Invalidate and so forth must take care to ensure the dirty page
  47. * accounting is preserved.
  48. */
  49. #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
  50. #define CONGESTION_OFF_THRESH(congestion_kb) \
  51. (CONGESTION_ON_THRESH(congestion_kb) - \
  52. (CONGESTION_ON_THRESH(congestion_kb) >> 2))
  53. static inline struct ceph_snap_context *page_snap_context(struct page *page)
  54. {
  55. if (PagePrivate(page))
  56. return (void *)page->private;
  57. return NULL;
  58. }
  59. /*
  60. * Dirty a page. Optimistically adjust accounting, on the assumption
  61. * that we won't race with invalidate. If we do, readjust.
  62. */
  63. static int ceph_set_page_dirty(struct page *page)
  64. {
  65. struct address_space *mapping = page->mapping;
  66. struct inode *inode;
  67. struct ceph_inode_info *ci;
  68. struct ceph_snap_context *snapc;
  69. int ret;
  70. if (unlikely(!mapping))
  71. return !TestSetPageDirty(page);
  72. if (PageDirty(page)) {
  73. dout("%p set_page_dirty %p idx %lu -- already dirty\n",
  74. mapping->host, page, page->index);
  75. BUG_ON(!PagePrivate(page));
  76. return 0;
  77. }
  78. inode = mapping->host;
  79. ci = ceph_inode(inode);
  80. /* dirty the head */
  81. spin_lock(&ci->i_ceph_lock);
  82. BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
  83. if (__ceph_have_pending_cap_snap(ci)) {
  84. struct ceph_cap_snap *capsnap =
  85. list_last_entry(&ci->i_cap_snaps,
  86. struct ceph_cap_snap,
  87. ci_item);
  88. snapc = ceph_get_snap_context(capsnap->context);
  89. capsnap->dirty_pages++;
  90. } else {
  91. BUG_ON(!ci->i_head_snapc);
  92. snapc = ceph_get_snap_context(ci->i_head_snapc);
  93. ++ci->i_wrbuffer_ref_head;
  94. }
  95. if (ci->i_wrbuffer_ref == 0)
  96. ihold(inode);
  97. ++ci->i_wrbuffer_ref;
  98. dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
  99. "snapc %p seq %lld (%d snaps)\n",
  100. mapping->host, page, page->index,
  101. ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
  102. ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
  103. snapc, snapc->seq, snapc->num_snaps);
  104. spin_unlock(&ci->i_ceph_lock);
  105. /*
  106. * Reference snap context in page->private. Also set
  107. * PagePrivate so that we get invalidatepage callback.
  108. */
  109. BUG_ON(PagePrivate(page));
  110. page->private = (unsigned long)snapc;
  111. SetPagePrivate(page);
  112. ret = __set_page_dirty_nobuffers(page);
  113. WARN_ON(!PageLocked(page));
  114. WARN_ON(!page->mapping);
  115. return ret;
  116. }
  117. /*
  118. * If we are truncating the full page (i.e. offset == 0), adjust the
  119. * dirty page counters appropriately. Only called if there is private
  120. * data on the page.
  121. */
  122. static void ceph_invalidatepage(struct page *page, unsigned int offset,
  123. unsigned int length)
  124. {
  125. struct inode *inode;
  126. struct ceph_inode_info *ci;
  127. struct ceph_snap_context *snapc = page_snap_context(page);
  128. inode = page->mapping->host;
  129. ci = ceph_inode(inode);
  130. if (offset != 0 || length != PAGE_SIZE) {
  131. dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n",
  132. inode, page, page->index, offset, length);
  133. return;
  134. }
  135. ceph_invalidate_fscache_page(inode, page);
  136. if (!PagePrivate(page))
  137. return;
  138. /*
  139. * We can get non-dirty pages here due to races between
  140. * set_page_dirty and truncate_complete_page; just spit out a
  141. * warning, in case we end up with accounting problems later.
  142. */
  143. if (!PageDirty(page))
  144. pr_err("%p invalidatepage %p page not dirty\n", inode, page);
  145. ClearPageChecked(page);
  146. dout("%p invalidatepage %p idx %lu full dirty page\n",
  147. inode, page, page->index);
  148. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  149. ceph_put_snap_context(snapc);
  150. page->private = 0;
  151. ClearPagePrivate(page);
  152. }
  153. static int ceph_releasepage(struct page *page, gfp_t g)
  154. {
  155. dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
  156. page, page->index, PageDirty(page) ? "" : "not ");
  157. /* Can we release the page from the cache? */
  158. if (!ceph_release_fscache_page(page, g))
  159. return 0;
  160. return !PagePrivate(page);
  161. }
  162. /*
  163. * read a single page, without unlocking it.
  164. */
  165. static int ceph_do_readpage(struct file *filp, struct page *page)
  166. {
  167. struct inode *inode = file_inode(filp);
  168. struct ceph_inode_info *ci = ceph_inode(inode);
  169. struct ceph_osd_client *osdc =
  170. &ceph_inode_to_client(inode)->client->osdc;
  171. int err = 0;
  172. u64 off = page_offset(page);
  173. u64 len = PAGE_SIZE;
  174. if (off >= i_size_read(inode)) {
  175. zero_user_segment(page, 0, PAGE_SIZE);
  176. SetPageUptodate(page);
  177. return 0;
  178. }
  179. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  180. /*
  181. * Uptodate inline data should have been added
  182. * into page cache while getting Fcr caps.
  183. */
  184. if (off == 0)
  185. return -EINVAL;
  186. zero_user_segment(page, 0, PAGE_SIZE);
  187. SetPageUptodate(page);
  188. return 0;
  189. }
  190. err = ceph_readpage_from_fscache(inode, page);
  191. if (err == 0)
  192. return -EINPROGRESS;
  193. dout("readpage inode %p file %p page %p index %lu\n",
  194. inode, filp, page, page->index);
  195. err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
  196. off, &len,
  197. ci->i_truncate_seq, ci->i_truncate_size,
  198. &page, 1, 0);
  199. if (err == -ENOENT)
  200. err = 0;
  201. if (err < 0) {
  202. SetPageError(page);
  203. ceph_fscache_readpage_cancel(inode, page);
  204. goto out;
  205. }
  206. if (err < PAGE_SIZE)
  207. /* zero fill remainder of page */
  208. zero_user_segment(page, err, PAGE_SIZE);
  209. else
  210. flush_dcache_page(page);
  211. SetPageUptodate(page);
  212. ceph_readpage_to_fscache(inode, page);
  213. out:
  214. return err < 0 ? err : 0;
  215. }
  216. static int ceph_readpage(struct file *filp, struct page *page)
  217. {
  218. int r = ceph_do_readpage(filp, page);
  219. if (r != -EINPROGRESS)
  220. unlock_page(page);
  221. else
  222. r = 0;
  223. return r;
  224. }
  225. /*
  226. * Finish an async read(ahead) op.
  227. */
  228. static void finish_read(struct ceph_osd_request *req)
  229. {
  230. struct inode *inode = req->r_inode;
  231. struct ceph_osd_data *osd_data;
  232. int rc = req->r_result <= 0 ? req->r_result : 0;
  233. int bytes = req->r_result >= 0 ? req->r_result : 0;
  234. int num_pages;
  235. int i;
  236. dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
  237. /* unlock all pages, zeroing any data we didn't read */
  238. osd_data = osd_req_op_extent_osd_data(req, 0);
  239. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  240. num_pages = calc_pages_for((u64)osd_data->alignment,
  241. (u64)osd_data->length);
  242. for (i = 0; i < num_pages; i++) {
  243. struct page *page = osd_data->pages[i];
  244. if (rc < 0 && rc != -ENOENT) {
  245. ceph_fscache_readpage_cancel(inode, page);
  246. goto unlock;
  247. }
  248. if (bytes < (int)PAGE_SIZE) {
  249. /* zero (remainder of) page */
  250. int s = bytes < 0 ? 0 : bytes;
  251. zero_user_segment(page, s, PAGE_SIZE);
  252. }
  253. dout("finish_read %p uptodate %p idx %lu\n", inode, page,
  254. page->index);
  255. flush_dcache_page(page);
  256. SetPageUptodate(page);
  257. ceph_readpage_to_fscache(inode, page);
  258. unlock:
  259. unlock_page(page);
  260. put_page(page);
  261. bytes -= PAGE_SIZE;
  262. }
  263. kfree(osd_data->pages);
  264. }
  265. /*
  266. * start an async read(ahead) operation. return nr_pages we submitted
  267. * a read for on success, or negative error code.
  268. */
  269. static int start_read(struct inode *inode, struct list_head *page_list, int max)
  270. {
  271. struct ceph_osd_client *osdc =
  272. &ceph_inode_to_client(inode)->client->osdc;
  273. struct ceph_inode_info *ci = ceph_inode(inode);
  274. struct page *page = list_entry(page_list->prev, struct page, lru);
  275. struct ceph_vino vino;
  276. struct ceph_osd_request *req;
  277. u64 off;
  278. u64 len;
  279. int i;
  280. struct page **pages;
  281. pgoff_t next_index;
  282. int nr_pages = 0;
  283. int got = 0;
  284. int ret = 0;
  285. if (!current->journal_info) {
  286. /* caller of readpages does not hold buffer and read caps
  287. * (fadvise, madvise and readahead cases) */
  288. int want = CEPH_CAP_FILE_CACHE;
  289. ret = ceph_try_get_caps(ci, CEPH_CAP_FILE_RD, want, &got);
  290. if (ret < 0) {
  291. dout("start_read %p, error getting cap\n", inode);
  292. } else if (!(got & want)) {
  293. dout("start_read %p, no cache cap\n", inode);
  294. ret = 0;
  295. }
  296. if (ret <= 0) {
  297. if (got)
  298. ceph_put_cap_refs(ci, got);
  299. while (!list_empty(page_list)) {
  300. page = list_entry(page_list->prev,
  301. struct page, lru);
  302. list_del(&page->lru);
  303. put_page(page);
  304. }
  305. return ret;
  306. }
  307. }
  308. off = (u64) page_offset(page);
  309. /* count pages */
  310. next_index = page->index;
  311. list_for_each_entry_reverse(page, page_list, lru) {
  312. if (page->index != next_index)
  313. break;
  314. nr_pages++;
  315. next_index++;
  316. if (max && nr_pages == max)
  317. break;
  318. }
  319. len = nr_pages << PAGE_SHIFT;
  320. dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages,
  321. off, len);
  322. vino = ceph_vino(inode);
  323. req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
  324. 0, 1, CEPH_OSD_OP_READ,
  325. CEPH_OSD_FLAG_READ, NULL,
  326. ci->i_truncate_seq, ci->i_truncate_size,
  327. false);
  328. if (IS_ERR(req)) {
  329. ret = PTR_ERR(req);
  330. goto out;
  331. }
  332. /* build page vector */
  333. nr_pages = calc_pages_for(0, len);
  334. pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL);
  335. if (!pages) {
  336. ret = -ENOMEM;
  337. goto out_put;
  338. }
  339. for (i = 0; i < nr_pages; ++i) {
  340. page = list_entry(page_list->prev, struct page, lru);
  341. BUG_ON(PageLocked(page));
  342. list_del(&page->lru);
  343. dout("start_read %p adding %p idx %lu\n", inode, page,
  344. page->index);
  345. if (add_to_page_cache_lru(page, &inode->i_data, page->index,
  346. GFP_KERNEL)) {
  347. ceph_fscache_uncache_page(inode, page);
  348. put_page(page);
  349. dout("start_read %p add_to_page_cache failed %p\n",
  350. inode, page);
  351. nr_pages = i;
  352. if (nr_pages > 0) {
  353. len = nr_pages << PAGE_SHIFT;
  354. osd_req_op_extent_update(req, 0, len);
  355. break;
  356. }
  357. goto out_pages;
  358. }
  359. pages[i] = page;
  360. }
  361. osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
  362. req->r_callback = finish_read;
  363. req->r_inode = inode;
  364. dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
  365. ret = ceph_osdc_start_request(osdc, req, false);
  366. if (ret < 0)
  367. goto out_pages;
  368. ceph_osdc_put_request(req);
  369. /* After adding locked pages to page cache, the inode holds cache cap.
  370. * So we can drop our cap refs. */
  371. if (got)
  372. ceph_put_cap_refs(ci, got);
  373. return nr_pages;
  374. out_pages:
  375. for (i = 0; i < nr_pages; ++i) {
  376. ceph_fscache_readpage_cancel(inode, pages[i]);
  377. unlock_page(pages[i]);
  378. }
  379. ceph_put_page_vector(pages, nr_pages, false);
  380. out_put:
  381. ceph_osdc_put_request(req);
  382. out:
  383. if (got)
  384. ceph_put_cap_refs(ci, got);
  385. return ret;
  386. }
  387. /*
  388. * Read multiple pages. Leave pages we don't read + unlock in page_list;
  389. * the caller (VM) cleans them up.
  390. */
  391. static int ceph_readpages(struct file *file, struct address_space *mapping,
  392. struct list_head *page_list, unsigned nr_pages)
  393. {
  394. struct inode *inode = file_inode(file);
  395. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  396. int rc = 0;
  397. int max = 0;
  398. if (ceph_inode(inode)->i_inline_version != CEPH_INLINE_NONE)
  399. return -EINVAL;
  400. rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list,
  401. &nr_pages);
  402. if (rc == 0)
  403. goto out;
  404. if (fsc->mount_options->rsize >= PAGE_SIZE)
  405. max = (fsc->mount_options->rsize + PAGE_SIZE - 1)
  406. >> PAGE_SHIFT;
  407. dout("readpages %p file %p nr_pages %d max %d\n", inode,
  408. file, nr_pages,
  409. max);
  410. while (!list_empty(page_list)) {
  411. rc = start_read(inode, page_list, max);
  412. if (rc < 0)
  413. goto out;
  414. }
  415. out:
  416. ceph_fscache_readpages_cancel(inode, page_list);
  417. dout("readpages %p file %p ret %d\n", inode, file, rc);
  418. return rc;
  419. }
  420. /*
  421. * Get ref for the oldest snapc for an inode with dirty data... that is, the
  422. * only snap context we are allowed to write back.
  423. */
  424. static struct ceph_snap_context *get_oldest_context(struct inode *inode,
  425. loff_t *snap_size)
  426. {
  427. struct ceph_inode_info *ci = ceph_inode(inode);
  428. struct ceph_snap_context *snapc = NULL;
  429. struct ceph_cap_snap *capsnap = NULL;
  430. spin_lock(&ci->i_ceph_lock);
  431. list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
  432. dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
  433. capsnap->context, capsnap->dirty_pages);
  434. if (capsnap->dirty_pages) {
  435. snapc = ceph_get_snap_context(capsnap->context);
  436. if (snap_size)
  437. *snap_size = capsnap->size;
  438. break;
  439. }
  440. }
  441. if (!snapc && ci->i_wrbuffer_ref_head) {
  442. snapc = ceph_get_snap_context(ci->i_head_snapc);
  443. dout(" head snapc %p has %d dirty pages\n",
  444. snapc, ci->i_wrbuffer_ref_head);
  445. }
  446. spin_unlock(&ci->i_ceph_lock);
  447. return snapc;
  448. }
  449. /*
  450. * Write a single page, but leave the page locked.
  451. *
  452. * If we get a write error, set the page error bit, but still adjust the
  453. * dirty page accounting (i.e., page is no longer dirty).
  454. */
  455. static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
  456. {
  457. struct inode *inode;
  458. struct ceph_inode_info *ci;
  459. struct ceph_fs_client *fsc;
  460. struct ceph_osd_client *osdc;
  461. struct ceph_snap_context *snapc, *oldest;
  462. loff_t page_off = page_offset(page);
  463. loff_t snap_size = -1;
  464. long writeback_stat;
  465. u64 truncate_size;
  466. u32 truncate_seq;
  467. int err = 0, len = PAGE_SIZE;
  468. dout("writepage %p idx %lu\n", page, page->index);
  469. if (!page->mapping || !page->mapping->host) {
  470. dout("writepage %p - no mapping\n", page);
  471. return -EFAULT;
  472. }
  473. inode = page->mapping->host;
  474. ci = ceph_inode(inode);
  475. fsc = ceph_inode_to_client(inode);
  476. osdc = &fsc->client->osdc;
  477. /* verify this is a writeable snap context */
  478. snapc = page_snap_context(page);
  479. if (snapc == NULL) {
  480. dout("writepage %p page %p not dirty?\n", inode, page);
  481. goto out;
  482. }
  483. oldest = get_oldest_context(inode, &snap_size);
  484. if (snapc->seq > oldest->seq) {
  485. dout("writepage %p page %p snapc %p not writeable - noop\n",
  486. inode, page, snapc);
  487. /* we should only noop if called by kswapd */
  488. WARN_ON((current->flags & PF_MEMALLOC) == 0);
  489. ceph_put_snap_context(oldest);
  490. goto out;
  491. }
  492. ceph_put_snap_context(oldest);
  493. spin_lock(&ci->i_ceph_lock);
  494. truncate_seq = ci->i_truncate_seq;
  495. truncate_size = ci->i_truncate_size;
  496. if (snap_size == -1)
  497. snap_size = i_size_read(inode);
  498. spin_unlock(&ci->i_ceph_lock);
  499. /* is this a partial page at end of file? */
  500. if (page_off >= snap_size) {
  501. dout("%p page eof %llu\n", page, snap_size);
  502. goto out;
  503. }
  504. if (snap_size < page_off + len)
  505. len = snap_size - page_off;
  506. dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
  507. inode, page, page->index, page_off, len, snapc);
  508. writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
  509. if (writeback_stat >
  510. CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
  511. set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
  512. set_page_writeback(page);
  513. err = ceph_osdc_writepages(osdc, ceph_vino(inode),
  514. &ci->i_layout, snapc,
  515. page_off, len,
  516. truncate_seq, truncate_size,
  517. &inode->i_mtime, &page, 1);
  518. if (err < 0) {
  519. struct writeback_control tmp_wbc;
  520. if (!wbc)
  521. wbc = &tmp_wbc;
  522. if (err == -ERESTARTSYS) {
  523. /* killed by SIGKILL */
  524. dout("writepage interrupted page %p\n", page);
  525. redirty_page_for_writepage(wbc, page);
  526. end_page_writeback(page);
  527. goto out;
  528. }
  529. dout("writepage setting page/mapping error %d %p\n",
  530. err, page);
  531. SetPageError(page);
  532. mapping_set_error(&inode->i_data, err);
  533. wbc->pages_skipped++;
  534. } else {
  535. dout("writepage cleaned page %p\n", page);
  536. err = 0; /* vfs expects us to return 0 */
  537. }
  538. page->private = 0;
  539. ClearPagePrivate(page);
  540. end_page_writeback(page);
  541. ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
  542. ceph_put_snap_context(snapc); /* page's reference */
  543. out:
  544. return err;
  545. }
  546. static int ceph_writepage(struct page *page, struct writeback_control *wbc)
  547. {
  548. int err;
  549. struct inode *inode = page->mapping->host;
  550. BUG_ON(!inode);
  551. ihold(inode);
  552. err = writepage_nounlock(page, wbc);
  553. if (err == -ERESTARTSYS) {
  554. /* direct memory reclaimer was killed by SIGKILL. return 0
  555. * to prevent caller from setting mapping/page error */
  556. err = 0;
  557. }
  558. unlock_page(page);
  559. iput(inode);
  560. return err;
  561. }
  562. /*
  563. * lame release_pages helper. release_pages() isn't exported to
  564. * modules.
  565. */
  566. static void ceph_release_pages(struct page **pages, int num)
  567. {
  568. struct pagevec pvec;
  569. int i;
  570. pagevec_init(&pvec, 0);
  571. for (i = 0; i < num; i++) {
  572. if (pagevec_add(&pvec, pages[i]) == 0)
  573. pagevec_release(&pvec);
  574. }
  575. pagevec_release(&pvec);
  576. }
  577. /*
  578. * async writeback completion handler.
  579. *
  580. * If we get an error, set the mapping error bit, but not the individual
  581. * page error bits.
  582. */
  583. static void writepages_finish(struct ceph_osd_request *req)
  584. {
  585. struct inode *inode = req->r_inode;
  586. struct ceph_inode_info *ci = ceph_inode(inode);
  587. struct ceph_osd_data *osd_data;
  588. struct page *page;
  589. int num_pages, total_pages = 0;
  590. int i, j;
  591. int rc = req->r_result;
  592. struct ceph_snap_context *snapc = req->r_snapc;
  593. struct address_space *mapping = inode->i_mapping;
  594. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  595. bool remove_page;
  596. dout("writepages_finish %p rc %d\n", inode, rc);
  597. if (rc < 0)
  598. mapping_set_error(mapping, rc);
  599. /*
  600. * We lost the cache cap, need to truncate the page before
  601. * it is unlocked, otherwise we'd truncate it later in the
  602. * page truncation thread, possibly losing some data that
  603. * raced its way in
  604. */
  605. remove_page = !(ceph_caps_issued(ci) &
  606. (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
  607. /* clean all pages */
  608. for (i = 0; i < req->r_num_ops; i++) {
  609. if (req->r_ops[i].op != CEPH_OSD_OP_WRITE)
  610. break;
  611. osd_data = osd_req_op_extent_osd_data(req, i);
  612. BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
  613. num_pages = calc_pages_for((u64)osd_data->alignment,
  614. (u64)osd_data->length);
  615. total_pages += num_pages;
  616. for (j = 0; j < num_pages; j++) {
  617. page = osd_data->pages[j];
  618. BUG_ON(!page);
  619. WARN_ON(!PageUptodate(page));
  620. if (atomic_long_dec_return(&fsc->writeback_count) <
  621. CONGESTION_OFF_THRESH(
  622. fsc->mount_options->congestion_kb))
  623. clear_bdi_congested(&fsc->backing_dev_info,
  624. BLK_RW_ASYNC);
  625. if (rc < 0)
  626. SetPageError(page);
  627. ceph_put_snap_context(page_snap_context(page));
  628. page->private = 0;
  629. ClearPagePrivate(page);
  630. dout("unlocking %p\n", page);
  631. end_page_writeback(page);
  632. if (remove_page)
  633. generic_error_remove_page(inode->i_mapping,
  634. page);
  635. unlock_page(page);
  636. }
  637. dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
  638. inode, osd_data->length, rc >= 0 ? num_pages : 0);
  639. ceph_release_pages(osd_data->pages, num_pages);
  640. }
  641. ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
  642. osd_data = osd_req_op_extent_osd_data(req, 0);
  643. if (osd_data->pages_from_pool)
  644. mempool_free(osd_data->pages,
  645. ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
  646. else
  647. kfree(osd_data->pages);
  648. ceph_osdc_put_request(req);
  649. }
  650. /*
  651. * initiate async writeback
  652. */
  653. static int ceph_writepages_start(struct address_space *mapping,
  654. struct writeback_control *wbc)
  655. {
  656. struct inode *inode = mapping->host;
  657. struct ceph_inode_info *ci = ceph_inode(inode);
  658. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  659. struct ceph_vino vino = ceph_vino(inode);
  660. pgoff_t index, start, end;
  661. int range_whole = 0;
  662. int should_loop = 1;
  663. pgoff_t max_pages = 0, max_pages_ever = 0;
  664. struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
  665. struct pagevec pvec;
  666. int done = 0;
  667. int rc = 0;
  668. unsigned int wsize = i_blocksize(inode);
  669. struct ceph_osd_request *req = NULL;
  670. int do_sync = 0;
  671. loff_t snap_size, i_size;
  672. u64 truncate_size;
  673. u32 truncate_seq;
  674. /*
  675. * Include a 'sync' in the OSD request if this is a data
  676. * integrity write (e.g., O_SYNC write or fsync()), or if our
  677. * cap is being revoked.
  678. */
  679. if ((wbc->sync_mode == WB_SYNC_ALL) ||
  680. ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
  681. do_sync = 1;
  682. dout("writepages_start %p dosync=%d (mode=%s)\n",
  683. inode, do_sync,
  684. wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
  685. (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
  686. if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
  687. if (ci->i_wrbuffer_ref > 0) {
  688. pr_warn_ratelimited(
  689. "writepage_start %p %lld forced umount\n",
  690. inode, ceph_ino(inode));
  691. }
  692. mapping_set_error(mapping, -EIO);
  693. return -EIO; /* we're in a forced umount, don't write! */
  694. }
  695. if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
  696. wsize = fsc->mount_options->wsize;
  697. if (wsize < PAGE_SIZE)
  698. wsize = PAGE_SIZE;
  699. max_pages_ever = wsize >> PAGE_SHIFT;
  700. pagevec_init(&pvec, 0);
  701. /* where to start/end? */
  702. if (wbc->range_cyclic) {
  703. start = mapping->writeback_index; /* Start from prev offset */
  704. end = -1;
  705. dout(" cyclic, start at %lu\n", start);
  706. } else {
  707. start = wbc->range_start >> PAGE_SHIFT;
  708. end = wbc->range_end >> PAGE_SHIFT;
  709. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  710. range_whole = 1;
  711. should_loop = 0;
  712. dout(" not cyclic, %lu to %lu\n", start, end);
  713. }
  714. index = start;
  715. retry:
  716. /* find oldest snap context with dirty data */
  717. ceph_put_snap_context(snapc);
  718. snap_size = -1;
  719. snapc = get_oldest_context(inode, &snap_size);
  720. if (!snapc) {
  721. /* hmm, why does writepages get called when there
  722. is no dirty data? */
  723. dout(" no snap context with dirty data?\n");
  724. goto out;
  725. }
  726. dout(" oldest snapc is %p seq %lld (%d snaps)\n",
  727. snapc, snapc->seq, snapc->num_snaps);
  728. spin_lock(&ci->i_ceph_lock);
  729. truncate_seq = ci->i_truncate_seq;
  730. truncate_size = ci->i_truncate_size;
  731. i_size = i_size_read(inode);
  732. spin_unlock(&ci->i_ceph_lock);
  733. if (last_snapc && snapc != last_snapc) {
  734. /* if we switched to a newer snapc, restart our scan at the
  735. * start of the original file range. */
  736. dout(" snapc differs from last pass, restarting at %lu\n",
  737. index);
  738. index = start;
  739. }
  740. last_snapc = snapc;
  741. while (!done && index <= end) {
  742. unsigned i;
  743. int first;
  744. pgoff_t strip_unit_end = 0;
  745. int num_ops = 0, op_idx;
  746. int pvec_pages, locked_pages = 0;
  747. struct page **pages = NULL, **data_pages;
  748. mempool_t *pool = NULL; /* Becomes non-null if mempool used */
  749. struct page *page;
  750. u64 offset = 0, len = 0;
  751. max_pages = max_pages_ever;
  752. get_more_pages:
  753. first = -1;
  754. pvec_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
  755. end, PAGECACHE_TAG_DIRTY);
  756. dout("pagevec_lookup_range_tag got %d\n", pvec_pages);
  757. if (!pvec_pages && !locked_pages)
  758. break;
  759. for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
  760. page = pvec.pages[i];
  761. dout("? %p idx %lu\n", page, page->index);
  762. if (locked_pages == 0)
  763. lock_page(page); /* first page */
  764. else if (!trylock_page(page))
  765. break;
  766. /* only dirty pages, or our accounting breaks */
  767. if (unlikely(!PageDirty(page)) ||
  768. unlikely(page->mapping != mapping)) {
  769. dout("!dirty or !mapping %p\n", page);
  770. unlock_page(page);
  771. break;
  772. }
  773. if (strip_unit_end && (page->index > strip_unit_end)) {
  774. dout("end of strip unit %p\n", page);
  775. unlock_page(page);
  776. break;
  777. }
  778. if (wbc->sync_mode != WB_SYNC_NONE) {
  779. dout("waiting on writeback %p\n", page);
  780. wait_on_page_writeback(page);
  781. }
  782. if (page_offset(page) >=
  783. (snap_size == -1 ? i_size : snap_size)) {
  784. dout("%p page eof %llu\n", page,
  785. (snap_size == -1 ? i_size : snap_size));
  786. done = 1;
  787. unlock_page(page);
  788. break;
  789. }
  790. if (PageWriteback(page)) {
  791. dout("%p under writeback\n", page);
  792. unlock_page(page);
  793. break;
  794. }
  795. /* only if matching snap context */
  796. pgsnapc = page_snap_context(page);
  797. if (pgsnapc->seq > snapc->seq) {
  798. dout("page snapc %p %lld > oldest %p %lld\n",
  799. pgsnapc, pgsnapc->seq, snapc, snapc->seq);
  800. unlock_page(page);
  801. if (!locked_pages)
  802. continue; /* keep looking for snap */
  803. break;
  804. }
  805. if (!clear_page_dirty_for_io(page)) {
  806. dout("%p !clear_page_dirty_for_io\n", page);
  807. unlock_page(page);
  808. break;
  809. }
  810. /*
  811. * We have something to write. If this is
  812. * the first locked page this time through,
  813. * calculate max possinle write size and
  814. * allocate a page array
  815. */
  816. if (locked_pages == 0) {
  817. u64 objnum;
  818. u64 objoff;
  819. /* prepare async write request */
  820. offset = (u64)page_offset(page);
  821. len = wsize;
  822. rc = ceph_calc_file_object_mapping(&ci->i_layout,
  823. offset, len,
  824. &objnum, &objoff,
  825. &len);
  826. if (rc < 0) {
  827. unlock_page(page);
  828. break;
  829. }
  830. num_ops = 1 + do_sync;
  831. strip_unit_end = page->index +
  832. ((len - 1) >> PAGE_SHIFT);
  833. BUG_ON(pages);
  834. max_pages = calc_pages_for(0, (u64)len);
  835. pages = kmalloc(max_pages * sizeof (*pages),
  836. GFP_NOFS);
  837. if (!pages) {
  838. pool = fsc->wb_pagevec_pool;
  839. pages = mempool_alloc(pool, GFP_NOFS);
  840. BUG_ON(!pages);
  841. }
  842. len = 0;
  843. } else if (page->index !=
  844. (offset + len) >> PAGE_SHIFT) {
  845. if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS :
  846. CEPH_OSD_MAX_OPS)) {
  847. redirty_page_for_writepage(wbc, page);
  848. unlock_page(page);
  849. break;
  850. }
  851. num_ops++;
  852. offset = (u64)page_offset(page);
  853. len = 0;
  854. }
  855. /* note position of first page in pvec */
  856. if (first < 0)
  857. first = i;
  858. dout("%p will write page %p idx %lu\n",
  859. inode, page, page->index);
  860. if (atomic_long_inc_return(&fsc->writeback_count) >
  861. CONGESTION_ON_THRESH(
  862. fsc->mount_options->congestion_kb)) {
  863. set_bdi_congested(&fsc->backing_dev_info,
  864. BLK_RW_ASYNC);
  865. }
  866. pages[locked_pages] = page;
  867. locked_pages++;
  868. len += PAGE_SIZE;
  869. }
  870. /* did we get anything? */
  871. if (!locked_pages)
  872. goto release_pvec_pages;
  873. if (i) {
  874. int j;
  875. BUG_ON(!locked_pages || first < 0);
  876. if (pvec_pages && i == pvec_pages &&
  877. locked_pages < max_pages) {
  878. dout("reached end pvec, trying for more\n");
  879. pagevec_reinit(&pvec);
  880. goto get_more_pages;
  881. }
  882. /* shift unused pages over in the pvec... we
  883. * will need to release them below. */
  884. for (j = i; j < pvec_pages; j++) {
  885. dout(" pvec leftover page %p\n", pvec.pages[j]);
  886. pvec.pages[j-i+first] = pvec.pages[j];
  887. }
  888. pvec.nr -= i-first;
  889. }
  890. new_request:
  891. offset = page_offset(pages[0]);
  892. len = wsize;
  893. req = ceph_osdc_new_request(&fsc->client->osdc,
  894. &ci->i_layout, vino,
  895. offset, &len, 0, num_ops,
  896. CEPH_OSD_OP_WRITE,
  897. CEPH_OSD_FLAG_WRITE |
  898. CEPH_OSD_FLAG_ONDISK,
  899. snapc, truncate_seq,
  900. truncate_size, false);
  901. if (IS_ERR(req)) {
  902. req = ceph_osdc_new_request(&fsc->client->osdc,
  903. &ci->i_layout, vino,
  904. offset, &len, 0,
  905. min(num_ops,
  906. CEPH_OSD_SLAB_OPS),
  907. CEPH_OSD_OP_WRITE,
  908. CEPH_OSD_FLAG_WRITE |
  909. CEPH_OSD_FLAG_ONDISK,
  910. snapc, truncate_seq,
  911. truncate_size, true);
  912. BUG_ON(IS_ERR(req));
  913. }
  914. BUG_ON(len < page_offset(pages[locked_pages - 1]) +
  915. PAGE_SIZE - offset);
  916. req->r_callback = writepages_finish;
  917. req->r_inode = inode;
  918. /* Format the osd request message and submit the write */
  919. len = 0;
  920. data_pages = pages;
  921. op_idx = 0;
  922. for (i = 0; i < locked_pages; i++) {
  923. u64 cur_offset = page_offset(pages[i]);
  924. if (offset + len != cur_offset) {
  925. if (op_idx + do_sync + 1 == req->r_num_ops)
  926. break;
  927. osd_req_op_extent_dup_last(req, op_idx,
  928. cur_offset - offset);
  929. dout("writepages got pages at %llu~%llu\n",
  930. offset, len);
  931. osd_req_op_extent_osd_data_pages(req, op_idx,
  932. data_pages, len, 0,
  933. !!pool, false);
  934. osd_req_op_extent_update(req, op_idx, len);
  935. len = 0;
  936. offset = cur_offset;
  937. data_pages = pages + i;
  938. op_idx++;
  939. }
  940. set_page_writeback(pages[i]);
  941. len += PAGE_SIZE;
  942. }
  943. if (snap_size != -1) {
  944. len = min(len, snap_size - offset);
  945. } else if (i == locked_pages) {
  946. /* writepages_finish() clears writeback pages
  947. * according to the data length, so make sure
  948. * data length covers all locked pages */
  949. u64 min_len = len + 1 - PAGE_SIZE;
  950. len = min(len, (u64)i_size_read(inode) - offset);
  951. len = max(len, min_len);
  952. }
  953. dout("writepages got pages at %llu~%llu\n", offset, len);
  954. osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
  955. 0, !!pool, false);
  956. osd_req_op_extent_update(req, op_idx, len);
  957. if (do_sync) {
  958. op_idx++;
  959. osd_req_op_init(req, op_idx, CEPH_OSD_OP_STARTSYNC, 0);
  960. }
  961. BUG_ON(op_idx + 1 != req->r_num_ops);
  962. pool = NULL;
  963. if (i < locked_pages) {
  964. BUG_ON(num_ops <= req->r_num_ops);
  965. num_ops -= req->r_num_ops;
  966. num_ops += do_sync;
  967. locked_pages -= i;
  968. /* allocate new pages array for next request */
  969. data_pages = pages;
  970. pages = kmalloc(locked_pages * sizeof (*pages),
  971. GFP_NOFS);
  972. if (!pages) {
  973. pool = fsc->wb_pagevec_pool;
  974. pages = mempool_alloc(pool, GFP_NOFS);
  975. BUG_ON(!pages);
  976. }
  977. memcpy(pages, data_pages + i,
  978. locked_pages * sizeof(*pages));
  979. memset(data_pages + i, 0,
  980. locked_pages * sizeof(*pages));
  981. } else {
  982. BUG_ON(num_ops != req->r_num_ops);
  983. index = pages[i - 1]->index + 1;
  984. /* request message now owns the pages array */
  985. pages = NULL;
  986. }
  987. req->r_mtime = inode->i_mtime;
  988. rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
  989. BUG_ON(rc);
  990. req = NULL;
  991. wbc->nr_to_write -= i;
  992. if (pages)
  993. goto new_request;
  994. if (wbc->nr_to_write <= 0)
  995. done = 1;
  996. release_pvec_pages:
  997. dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
  998. pvec.nr ? pvec.pages[0] : NULL);
  999. pagevec_release(&pvec);
  1000. if (locked_pages && !done)
  1001. goto retry;
  1002. }
  1003. if (should_loop && !done) {
  1004. /* more to do; loop back to beginning of file */
  1005. dout("writepages looping back to beginning of file\n");
  1006. should_loop = 0;
  1007. index = 0;
  1008. goto retry;
  1009. }
  1010. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  1011. mapping->writeback_index = index;
  1012. out:
  1013. ceph_osdc_put_request(req);
  1014. ceph_put_snap_context(snapc);
  1015. dout("writepages done, rc = %d\n", rc);
  1016. return rc;
  1017. }
  1018. /*
  1019. * See if a given @snapc is either writeable, or already written.
  1020. */
  1021. static int context_is_writeable_or_written(struct inode *inode,
  1022. struct ceph_snap_context *snapc)
  1023. {
  1024. struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
  1025. int ret = !oldest || snapc->seq <= oldest->seq;
  1026. ceph_put_snap_context(oldest);
  1027. return ret;
  1028. }
  1029. /*
  1030. * We are only allowed to write into/dirty the page if the page is
  1031. * clean, or already dirty within the same snap context.
  1032. *
  1033. * called with page locked.
  1034. * return success with page locked,
  1035. * or any failure (incl -EAGAIN) with page unlocked.
  1036. */
  1037. static int ceph_update_writeable_page(struct file *file,
  1038. loff_t pos, unsigned len,
  1039. struct page *page)
  1040. {
  1041. struct inode *inode = file_inode(file);
  1042. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1043. struct ceph_inode_info *ci = ceph_inode(inode);
  1044. loff_t page_off = pos & PAGE_MASK;
  1045. int pos_in_page = pos & ~PAGE_MASK;
  1046. int end_in_page = pos_in_page + len;
  1047. loff_t i_size;
  1048. int r;
  1049. struct ceph_snap_context *snapc, *oldest;
  1050. if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
  1051. dout(" page %p forced umount\n", page);
  1052. unlock_page(page);
  1053. return -EIO;
  1054. }
  1055. retry_locked:
  1056. /* writepages currently holds page lock, but if we change that later, */
  1057. wait_on_page_writeback(page);
  1058. snapc = page_snap_context(page);
  1059. if (snapc && snapc != ci->i_head_snapc) {
  1060. /*
  1061. * this page is already dirty in another (older) snap
  1062. * context! is it writeable now?
  1063. */
  1064. oldest = get_oldest_context(inode, NULL);
  1065. if (snapc->seq > oldest->seq) {
  1066. ceph_put_snap_context(oldest);
  1067. dout(" page %p snapc %p not current or oldest\n",
  1068. page, snapc);
  1069. /*
  1070. * queue for writeback, and wait for snapc to
  1071. * be writeable or written
  1072. */
  1073. snapc = ceph_get_snap_context(snapc);
  1074. unlock_page(page);
  1075. ceph_queue_writeback(inode);
  1076. r = wait_event_killable(ci->i_cap_wq,
  1077. context_is_writeable_or_written(inode, snapc));
  1078. ceph_put_snap_context(snapc);
  1079. if (r == -ERESTARTSYS)
  1080. return r;
  1081. return -EAGAIN;
  1082. }
  1083. ceph_put_snap_context(oldest);
  1084. /* yay, writeable, do it now (without dropping page lock) */
  1085. dout(" page %p snapc %p not current, but oldest\n",
  1086. page, snapc);
  1087. if (!clear_page_dirty_for_io(page))
  1088. goto retry_locked;
  1089. r = writepage_nounlock(page, NULL);
  1090. if (r < 0)
  1091. goto fail_unlock;
  1092. goto retry_locked;
  1093. }
  1094. if (PageUptodate(page)) {
  1095. dout(" page %p already uptodate\n", page);
  1096. return 0;
  1097. }
  1098. /* full page? */
  1099. if (pos_in_page == 0 && len == PAGE_SIZE)
  1100. return 0;
  1101. /* past end of file? */
  1102. i_size = i_size_read(inode);
  1103. if (page_off >= i_size ||
  1104. (pos_in_page == 0 && (pos+len) >= i_size &&
  1105. end_in_page - pos_in_page != PAGE_SIZE)) {
  1106. dout(" zeroing %p 0 - %d and %d - %d\n",
  1107. page, pos_in_page, end_in_page, (int)PAGE_SIZE);
  1108. zero_user_segments(page,
  1109. 0, pos_in_page,
  1110. end_in_page, PAGE_SIZE);
  1111. return 0;
  1112. }
  1113. /* we need to read it. */
  1114. r = ceph_do_readpage(file, page);
  1115. if (r < 0) {
  1116. if (r == -EINPROGRESS)
  1117. return -EAGAIN;
  1118. goto fail_unlock;
  1119. }
  1120. goto retry_locked;
  1121. fail_unlock:
  1122. unlock_page(page);
  1123. return r;
  1124. }
  1125. /*
  1126. * We are only allowed to write into/dirty the page if the page is
  1127. * clean, or already dirty within the same snap context.
  1128. */
  1129. static int ceph_write_begin(struct file *file, struct address_space *mapping,
  1130. loff_t pos, unsigned len, unsigned flags,
  1131. struct page **pagep, void **fsdata)
  1132. {
  1133. struct inode *inode = file_inode(file);
  1134. struct page *page;
  1135. pgoff_t index = pos >> PAGE_SHIFT;
  1136. int r;
  1137. do {
  1138. /* get a page */
  1139. page = grab_cache_page_write_begin(mapping, index, 0);
  1140. if (!page)
  1141. return -ENOMEM;
  1142. dout("write_begin file %p inode %p page %p %d~%d\n", file,
  1143. inode, page, (int)pos, (int)len);
  1144. r = ceph_update_writeable_page(file, pos, len, page);
  1145. if (r < 0)
  1146. put_page(page);
  1147. else
  1148. *pagep = page;
  1149. } while (r == -EAGAIN);
  1150. return r;
  1151. }
  1152. /*
  1153. * we don't do anything in here that simple_write_end doesn't do
  1154. * except adjust dirty page accounting
  1155. */
  1156. static int ceph_write_end(struct file *file, struct address_space *mapping,
  1157. loff_t pos, unsigned len, unsigned copied,
  1158. struct page *page, void *fsdata)
  1159. {
  1160. struct inode *inode = file_inode(file);
  1161. unsigned from = pos & (PAGE_SIZE - 1);
  1162. int check_cap = 0;
  1163. dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
  1164. inode, page, (int)pos, (int)copied, (int)len);
  1165. /* zero the stale part of the page if we did a short copy */
  1166. if (copied < len)
  1167. zero_user_segment(page, from+copied, len);
  1168. /* did file size increase? */
  1169. if (pos+copied > i_size_read(inode))
  1170. check_cap = ceph_inode_set_size(inode, pos+copied);
  1171. if (!PageUptodate(page))
  1172. SetPageUptodate(page);
  1173. set_page_dirty(page);
  1174. unlock_page(page);
  1175. put_page(page);
  1176. if (check_cap)
  1177. ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
  1178. return copied;
  1179. }
  1180. /*
  1181. * we set .direct_IO to indicate direct io is supported, but since we
  1182. * intercept O_DIRECT reads and writes early, this function should
  1183. * never get called.
  1184. */
  1185. static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
  1186. {
  1187. WARN_ON(1);
  1188. return -EINVAL;
  1189. }
  1190. const struct address_space_operations ceph_aops = {
  1191. .readpage = ceph_readpage,
  1192. .readpages = ceph_readpages,
  1193. .writepage = ceph_writepage,
  1194. .writepages = ceph_writepages_start,
  1195. .write_begin = ceph_write_begin,
  1196. .write_end = ceph_write_end,
  1197. .set_page_dirty = ceph_set_page_dirty,
  1198. .invalidatepage = ceph_invalidatepage,
  1199. .releasepage = ceph_releasepage,
  1200. .direct_IO = ceph_direct_io,
  1201. };
  1202. static void ceph_block_sigs(sigset_t *oldset)
  1203. {
  1204. sigset_t mask;
  1205. siginitsetinv(&mask, sigmask(SIGKILL));
  1206. sigprocmask(SIG_BLOCK, &mask, oldset);
  1207. }
  1208. static void ceph_restore_sigs(sigset_t *oldset)
  1209. {
  1210. sigprocmask(SIG_SETMASK, oldset, NULL);
  1211. }
  1212. /*
  1213. * vm ops
  1214. */
  1215. static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1216. {
  1217. struct inode *inode = file_inode(vma->vm_file);
  1218. struct ceph_inode_info *ci = ceph_inode(inode);
  1219. struct ceph_file_info *fi = vma->vm_file->private_data;
  1220. struct page *pinned_page = NULL;
  1221. loff_t off = vmf->pgoff << PAGE_SHIFT;
  1222. int want, got, ret;
  1223. sigset_t oldset;
  1224. ceph_block_sigs(&oldset);
  1225. dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
  1226. inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
  1227. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1228. want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
  1229. else
  1230. want = CEPH_CAP_FILE_CACHE;
  1231. got = 0;
  1232. ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
  1233. if (ret < 0)
  1234. goto out_restore;
  1235. dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
  1236. inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
  1237. if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
  1238. ci->i_inline_version == CEPH_INLINE_NONE) {
  1239. current->journal_info = vma->vm_file;
  1240. ret = filemap_fault(vma, vmf);
  1241. current->journal_info = NULL;
  1242. } else
  1243. ret = -EAGAIN;
  1244. dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n",
  1245. inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret);
  1246. if (pinned_page)
  1247. put_page(pinned_page);
  1248. ceph_put_cap_refs(ci, got);
  1249. if (ret != -EAGAIN)
  1250. goto out_restore;
  1251. /* read inline data */
  1252. if (off >= PAGE_SIZE) {
  1253. /* does not support inline data > PAGE_SIZE */
  1254. ret = VM_FAULT_SIGBUS;
  1255. } else {
  1256. int ret1;
  1257. struct address_space *mapping = inode->i_mapping;
  1258. struct page *page = find_or_create_page(mapping, 0,
  1259. mapping_gfp_constraint(mapping,
  1260. ~__GFP_FS));
  1261. if (!page) {
  1262. ret = VM_FAULT_OOM;
  1263. goto out_inline;
  1264. }
  1265. ret1 = __ceph_do_getattr(inode, page,
  1266. CEPH_STAT_CAP_INLINE_DATA, true);
  1267. if (ret1 < 0 || off >= i_size_read(inode)) {
  1268. unlock_page(page);
  1269. put_page(page);
  1270. if (ret1 < 0)
  1271. ret = ret1;
  1272. else
  1273. ret = VM_FAULT_SIGBUS;
  1274. goto out_inline;
  1275. }
  1276. if (ret1 < PAGE_SIZE)
  1277. zero_user_segment(page, ret1, PAGE_SIZE);
  1278. else
  1279. flush_dcache_page(page);
  1280. SetPageUptodate(page);
  1281. vmf->page = page;
  1282. ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
  1283. out_inline:
  1284. dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
  1285. inode, off, (size_t)PAGE_SIZE, ret);
  1286. }
  1287. out_restore:
  1288. ceph_restore_sigs(&oldset);
  1289. if (ret < 0)
  1290. ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  1291. return ret;
  1292. }
  1293. /*
  1294. * Reuse write_begin here for simplicity.
  1295. */
  1296. static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
  1297. {
  1298. struct inode *inode = file_inode(vma->vm_file);
  1299. struct ceph_inode_info *ci = ceph_inode(inode);
  1300. struct ceph_file_info *fi = vma->vm_file->private_data;
  1301. struct ceph_cap_flush *prealloc_cf;
  1302. struct page *page = vmf->page;
  1303. loff_t off = page_offset(page);
  1304. loff_t size = i_size_read(inode);
  1305. size_t len;
  1306. int want, got, ret;
  1307. sigset_t oldset;
  1308. prealloc_cf = ceph_alloc_cap_flush();
  1309. if (!prealloc_cf)
  1310. return VM_FAULT_OOM;
  1311. ceph_block_sigs(&oldset);
  1312. if (ci->i_inline_version != CEPH_INLINE_NONE) {
  1313. struct page *locked_page = NULL;
  1314. if (off == 0) {
  1315. lock_page(page);
  1316. locked_page = page;
  1317. }
  1318. ret = ceph_uninline_data(vma->vm_file, locked_page);
  1319. if (locked_page)
  1320. unlock_page(locked_page);
  1321. if (ret < 0)
  1322. goto out_free;
  1323. }
  1324. if (off + PAGE_SIZE <= size)
  1325. len = PAGE_SIZE;
  1326. else
  1327. len = size & ~PAGE_MASK;
  1328. dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
  1329. inode, ceph_vinop(inode), off, len, size);
  1330. if (fi->fmode & CEPH_FILE_MODE_LAZY)
  1331. want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
  1332. else
  1333. want = CEPH_CAP_FILE_BUFFER;
  1334. got = 0;
  1335. ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
  1336. &got, NULL);
  1337. if (ret < 0)
  1338. goto out_free;
  1339. dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
  1340. inode, off, len, ceph_cap_string(got));
  1341. /* Update time before taking page lock */
  1342. file_update_time(vma->vm_file);
  1343. do {
  1344. lock_page(page);
  1345. if ((off > size) || (page->mapping != inode->i_mapping)) {
  1346. unlock_page(page);
  1347. ret = VM_FAULT_NOPAGE;
  1348. break;
  1349. }
  1350. ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
  1351. if (ret >= 0) {
  1352. /* success. we'll keep the page locked. */
  1353. set_page_dirty(page);
  1354. ret = VM_FAULT_LOCKED;
  1355. }
  1356. } while (ret == -EAGAIN);
  1357. if (ret == VM_FAULT_LOCKED ||
  1358. ci->i_inline_version != CEPH_INLINE_NONE) {
  1359. int dirty;
  1360. spin_lock(&ci->i_ceph_lock);
  1361. ci->i_inline_version = CEPH_INLINE_NONE;
  1362. dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
  1363. &prealloc_cf);
  1364. spin_unlock(&ci->i_ceph_lock);
  1365. if (dirty)
  1366. __mark_inode_dirty(inode, dirty);
  1367. }
  1368. dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n",
  1369. inode, off, len, ceph_cap_string(got), ret);
  1370. ceph_put_cap_refs(ci, got);
  1371. out_free:
  1372. ceph_restore_sigs(&oldset);
  1373. ceph_free_cap_flush(prealloc_cf);
  1374. if (ret < 0)
  1375. ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
  1376. return ret;
  1377. }
  1378. void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
  1379. char *data, size_t len)
  1380. {
  1381. struct address_space *mapping = inode->i_mapping;
  1382. struct page *page;
  1383. if (locked_page) {
  1384. page = locked_page;
  1385. } else {
  1386. if (i_size_read(inode) == 0)
  1387. return;
  1388. page = find_or_create_page(mapping, 0,
  1389. mapping_gfp_constraint(mapping,
  1390. ~__GFP_FS));
  1391. if (!page)
  1392. return;
  1393. if (PageUptodate(page)) {
  1394. unlock_page(page);
  1395. put_page(page);
  1396. return;
  1397. }
  1398. }
  1399. dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
  1400. inode, ceph_vinop(inode), len, locked_page);
  1401. if (len > 0) {
  1402. void *kaddr = kmap_atomic(page);
  1403. memcpy(kaddr, data, len);
  1404. kunmap_atomic(kaddr);
  1405. }
  1406. if (page != locked_page) {
  1407. if (len < PAGE_SIZE)
  1408. zero_user_segment(page, len, PAGE_SIZE);
  1409. else
  1410. flush_dcache_page(page);
  1411. SetPageUptodate(page);
  1412. unlock_page(page);
  1413. put_page(page);
  1414. }
  1415. }
  1416. int ceph_uninline_data(struct file *filp, struct page *locked_page)
  1417. {
  1418. struct inode *inode = file_inode(filp);
  1419. struct ceph_inode_info *ci = ceph_inode(inode);
  1420. struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
  1421. struct ceph_osd_request *req;
  1422. struct page *page = NULL;
  1423. u64 len, inline_version;
  1424. int err = 0;
  1425. bool from_pagecache = false;
  1426. spin_lock(&ci->i_ceph_lock);
  1427. inline_version = ci->i_inline_version;
  1428. spin_unlock(&ci->i_ceph_lock);
  1429. dout("uninline_data %p %llx.%llx inline_version %llu\n",
  1430. inode, ceph_vinop(inode), inline_version);
  1431. if (inline_version == 1 || /* initial version, no data */
  1432. inline_version == CEPH_INLINE_NONE)
  1433. goto out;
  1434. if (locked_page) {
  1435. page = locked_page;
  1436. WARN_ON(!PageUptodate(page));
  1437. } else if (ceph_caps_issued(ci) &
  1438. (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) {
  1439. page = find_get_page(inode->i_mapping, 0);
  1440. if (page) {
  1441. if (PageUptodate(page)) {
  1442. from_pagecache = true;
  1443. lock_page(page);
  1444. } else {
  1445. put_page(page);
  1446. page = NULL;
  1447. }
  1448. }
  1449. }
  1450. if (page) {
  1451. len = i_size_read(inode);
  1452. if (len > PAGE_SIZE)
  1453. len = PAGE_SIZE;
  1454. } else {
  1455. page = __page_cache_alloc(GFP_NOFS);
  1456. if (!page) {
  1457. err = -ENOMEM;
  1458. goto out;
  1459. }
  1460. err = __ceph_do_getattr(inode, page,
  1461. CEPH_STAT_CAP_INLINE_DATA, true);
  1462. if (err < 0) {
  1463. /* no inline data */
  1464. if (err == -ENODATA)
  1465. err = 0;
  1466. goto out;
  1467. }
  1468. len = err;
  1469. }
  1470. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1471. ceph_vino(inode), 0, &len, 0, 1,
  1472. CEPH_OSD_OP_CREATE,
  1473. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1474. NULL, 0, 0, false);
  1475. if (IS_ERR(req)) {
  1476. err = PTR_ERR(req);
  1477. goto out;
  1478. }
  1479. req->r_mtime = inode->i_mtime;
  1480. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1481. if (!err)
  1482. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1483. ceph_osdc_put_request(req);
  1484. if (err < 0)
  1485. goto out;
  1486. req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
  1487. ceph_vino(inode), 0, &len, 1, 3,
  1488. CEPH_OSD_OP_WRITE,
  1489. CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
  1490. NULL, ci->i_truncate_seq,
  1491. ci->i_truncate_size, false);
  1492. if (IS_ERR(req)) {
  1493. err = PTR_ERR(req);
  1494. goto out;
  1495. }
  1496. osd_req_op_extent_osd_data_pages(req, 1, &page, len, 0, false, false);
  1497. {
  1498. __le64 xattr_buf = cpu_to_le64(inline_version);
  1499. err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
  1500. "inline_version", &xattr_buf,
  1501. sizeof(xattr_buf),
  1502. CEPH_OSD_CMPXATTR_OP_GT,
  1503. CEPH_OSD_CMPXATTR_MODE_U64);
  1504. if (err)
  1505. goto out_put;
  1506. }
  1507. {
  1508. char xattr_buf[32];
  1509. int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
  1510. "%llu", inline_version);
  1511. err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
  1512. "inline_version",
  1513. xattr_buf, xattr_len, 0, 0);
  1514. if (err)
  1515. goto out_put;
  1516. }
  1517. req->r_mtime = inode->i_mtime;
  1518. err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
  1519. if (!err)
  1520. err = ceph_osdc_wait_request(&fsc->client->osdc, req);
  1521. out_put:
  1522. ceph_osdc_put_request(req);
  1523. if (err == -ECANCELED)
  1524. err = 0;
  1525. out:
  1526. if (page && page != locked_page) {
  1527. if (from_pagecache) {
  1528. unlock_page(page);
  1529. put_page(page);
  1530. } else
  1531. __free_pages(page, 0);
  1532. }
  1533. dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
  1534. inode, ceph_vinop(inode), inline_version, err);
  1535. return err;
  1536. }
  1537. static const struct vm_operations_struct ceph_vmops = {
  1538. .fault = ceph_filemap_fault,
  1539. .page_mkwrite = ceph_page_mkwrite,
  1540. };
  1541. int ceph_mmap(struct file *file, struct vm_area_struct *vma)
  1542. {
  1543. struct address_space *mapping = file->f_mapping;
  1544. if (!mapping->a_ops->readpage)
  1545. return -ENOEXEC;
  1546. file_accessed(file);
  1547. vma->vm_ops = &ceph_vmops;
  1548. return 0;
  1549. }
  1550. enum {
  1551. POOL_READ = 1,
  1552. POOL_WRITE = 2,
  1553. };
  1554. static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
  1555. s64 pool, struct ceph_string *pool_ns)
  1556. {
  1557. struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
  1558. struct ceph_mds_client *mdsc = fsc->mdsc;
  1559. struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
  1560. struct rb_node **p, *parent;
  1561. struct ceph_pool_perm *perm;
  1562. struct page **pages;
  1563. size_t pool_ns_len;
  1564. int err = 0, err2 = 0, have = 0;
  1565. down_read(&mdsc->pool_perm_rwsem);
  1566. p = &mdsc->pool_perm_tree.rb_node;
  1567. while (*p) {
  1568. perm = rb_entry(*p, struct ceph_pool_perm, node);
  1569. if (pool < perm->pool)
  1570. p = &(*p)->rb_left;
  1571. else if (pool > perm->pool)
  1572. p = &(*p)->rb_right;
  1573. else {
  1574. int ret = ceph_compare_string(pool_ns,
  1575. perm->pool_ns,
  1576. perm->pool_ns_len);
  1577. if (ret < 0)
  1578. p = &(*p)->rb_left;
  1579. else if (ret > 0)
  1580. p = &(*p)->rb_right;
  1581. else {
  1582. have = perm->perm;
  1583. break;
  1584. }
  1585. }
  1586. }
  1587. up_read(&mdsc->pool_perm_rwsem);
  1588. if (*p)
  1589. goto out;
  1590. if (pool_ns)
  1591. dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
  1592. pool, (int)pool_ns->len, pool_ns->str);
  1593. else
  1594. dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
  1595. down_write(&mdsc->pool_perm_rwsem);
  1596. p = &mdsc->pool_perm_tree.rb_node;
  1597. parent = NULL;
  1598. while (*p) {
  1599. parent = *p;
  1600. perm = rb_entry(parent, struct ceph_pool_perm, node);
  1601. if (pool < perm->pool)
  1602. p = &(*p)->rb_left;
  1603. else if (pool > perm->pool)
  1604. p = &(*p)->rb_right;
  1605. else {
  1606. int ret = ceph_compare_string(pool_ns,
  1607. perm->pool_ns,
  1608. perm->pool_ns_len);
  1609. if (ret < 0)
  1610. p = &(*p)->rb_left;
  1611. else if (ret > 0)
  1612. p = &(*p)->rb_right;
  1613. else {
  1614. have = perm->perm;
  1615. break;
  1616. }
  1617. }
  1618. }
  1619. if (*p) {
  1620. up_write(&mdsc->pool_perm_rwsem);
  1621. goto out;
  1622. }
  1623. rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
  1624. 1, false, GFP_NOFS);
  1625. if (!rd_req) {
  1626. err = -ENOMEM;
  1627. goto out_unlock;
  1628. }
  1629. rd_req->r_flags = CEPH_OSD_FLAG_READ;
  1630. osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
  1631. rd_req->r_base_oloc.pool = pool;
  1632. if (pool_ns)
  1633. rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
  1634. ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
  1635. err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
  1636. if (err)
  1637. goto out_unlock;
  1638. wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
  1639. 1, false, GFP_NOFS);
  1640. if (!wr_req) {
  1641. err = -ENOMEM;
  1642. goto out_unlock;
  1643. }
  1644. wr_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ACK;
  1645. osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
  1646. ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
  1647. ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
  1648. err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
  1649. if (err)
  1650. goto out_unlock;
  1651. /* one page should be large enough for STAT data */
  1652. pages = ceph_alloc_page_vector(1, GFP_KERNEL);
  1653. if (IS_ERR(pages)) {
  1654. err = PTR_ERR(pages);
  1655. goto out_unlock;
  1656. }
  1657. osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
  1658. 0, false, true);
  1659. err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
  1660. wr_req->r_mtime = ci->vfs_inode.i_mtime;
  1661. err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
  1662. if (!err)
  1663. err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
  1664. if (!err2)
  1665. err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
  1666. if (err >= 0 || err == -ENOENT)
  1667. have |= POOL_READ;
  1668. else if (err != -EPERM)
  1669. goto out_unlock;
  1670. if (err2 == 0 || err2 == -EEXIST)
  1671. have |= POOL_WRITE;
  1672. else if (err2 != -EPERM) {
  1673. err = err2;
  1674. goto out_unlock;
  1675. }
  1676. pool_ns_len = pool_ns ? pool_ns->len : 0;
  1677. perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
  1678. if (!perm) {
  1679. err = -ENOMEM;
  1680. goto out_unlock;
  1681. }
  1682. perm->pool = pool;
  1683. perm->perm = have;
  1684. perm->pool_ns_len = pool_ns_len;
  1685. if (pool_ns_len > 0)
  1686. memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
  1687. perm->pool_ns[pool_ns_len] = 0;
  1688. rb_link_node(&perm->node, parent, p);
  1689. rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
  1690. err = 0;
  1691. out_unlock:
  1692. up_write(&mdsc->pool_perm_rwsem);
  1693. ceph_osdc_put_request(rd_req);
  1694. ceph_osdc_put_request(wr_req);
  1695. out:
  1696. if (!err)
  1697. err = have;
  1698. if (pool_ns)
  1699. dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
  1700. pool, (int)pool_ns->len, pool_ns->str, err);
  1701. else
  1702. dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
  1703. return err;
  1704. }
  1705. int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
  1706. {
  1707. s64 pool;
  1708. struct ceph_string *pool_ns;
  1709. int ret, flags;
  1710. if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
  1711. NOPOOLPERM))
  1712. return 0;
  1713. spin_lock(&ci->i_ceph_lock);
  1714. flags = ci->i_ceph_flags;
  1715. pool = ci->i_layout.pool_id;
  1716. spin_unlock(&ci->i_ceph_lock);
  1717. check:
  1718. if (flags & CEPH_I_POOL_PERM) {
  1719. if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
  1720. dout("ceph_pool_perm_check pool %lld no read perm\n",
  1721. pool);
  1722. return -EPERM;
  1723. }
  1724. if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
  1725. dout("ceph_pool_perm_check pool %lld no write perm\n",
  1726. pool);
  1727. return -EPERM;
  1728. }
  1729. return 0;
  1730. }
  1731. pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
  1732. ret = __ceph_pool_perm_get(ci, pool, pool_ns);
  1733. ceph_put_string(pool_ns);
  1734. if (ret < 0)
  1735. return ret;
  1736. flags = CEPH_I_POOL_PERM;
  1737. if (ret & POOL_READ)
  1738. flags |= CEPH_I_POOL_RD;
  1739. if (ret & POOL_WRITE)
  1740. flags |= CEPH_I_POOL_WR;
  1741. spin_lock(&ci->i_ceph_lock);
  1742. if (pool == ci->i_layout.pool_id &&
  1743. pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
  1744. ci->i_ceph_flags |= flags;
  1745. } else {
  1746. pool = ci->i_layout.pool_id;
  1747. flags = ci->i_ceph_flags;
  1748. }
  1749. spin_unlock(&ci->i_ceph_lock);
  1750. goto check;
  1751. }
  1752. void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
  1753. {
  1754. struct ceph_pool_perm *perm;
  1755. struct rb_node *n;
  1756. while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
  1757. n = rb_first(&mdsc->pool_perm_tree);
  1758. perm = rb_entry(n, struct ceph_pool_perm, node);
  1759. rb_erase(n, &mdsc->pool_perm_tree);
  1760. kfree(perm);
  1761. }
  1762. }