pagelist.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314
  1. /*
  2. * linux/fs/nfs/pagelist.c
  3. *
  4. * A set of helper functions for managing NFS read and write requests.
  5. * The main purpose of these routines is to provide support for the
  6. * coalescing of several requests into a single RPC call.
  7. *
  8. * Copyright 2000, 2001 (c) Trond Myklebust <[email protected]>
  9. *
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/sched.h>
  14. #include <linux/sunrpc/clnt.h>
  15. #include <linux/nfs.h>
  16. #include <linux/nfs3.h>
  17. #include <linux/nfs4.h>
  18. #include <linux/nfs_page.h>
  19. #include <linux/nfs_fs.h>
  20. #include <linux/nfs_mount.h>
  21. #include <linux/export.h>
  22. #include "internal.h"
  23. #include "pnfs.h"
  24. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  25. static struct kmem_cache *nfs_page_cachep;
  26. static const struct rpc_call_ops nfs_pgio_common_ops;
  27. static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount,
  28. gfp_t gfp_flags)
  29. {
  30. p->npages = pagecount;
  31. if (pagecount <= ARRAY_SIZE(p->page_array))
  32. p->pagevec = p->page_array;
  33. else {
  34. p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
  35. if (!p->pagevec)
  36. p->npages = 0;
  37. }
  38. return p->pagevec != NULL;
  39. }
  40. struct nfs_pgio_mirror *
  41. nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  42. {
  43. return nfs_pgio_has_mirroring(desc) ?
  44. &desc->pg_mirrors[desc->pg_mirror_idx] :
  45. &desc->pg_mirrors[0];
  46. }
  47. EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  48. void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  49. struct nfs_pgio_header *hdr,
  50. void (*release)(struct nfs_pgio_header *hdr))
  51. {
  52. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  53. hdr->req = nfs_list_entry(mirror->pg_list.next);
  54. hdr->inode = desc->pg_inode;
  55. hdr->cred = hdr->req->wb_context->cred;
  56. hdr->io_start = req_offset(hdr->req);
  57. hdr->good_bytes = mirror->pg_count;
  58. hdr->dreq = desc->pg_dreq;
  59. hdr->layout_private = desc->pg_layout_private;
  60. hdr->release = release;
  61. hdr->completion_ops = desc->pg_completion_ops;
  62. if (hdr->completion_ops->init_hdr)
  63. hdr->completion_ops->init_hdr(hdr);
  64. hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  65. }
  66. EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  67. void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  68. {
  69. spin_lock(&hdr->lock);
  70. if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
  71. || pos < hdr->io_start + hdr->good_bytes) {
  72. clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  73. hdr->good_bytes = pos - hdr->io_start;
  74. hdr->error = error;
  75. }
  76. spin_unlock(&hdr->lock);
  77. }
  78. static inline struct nfs_page *
  79. nfs_page_alloc(void)
  80. {
  81. struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
  82. if (p)
  83. INIT_LIST_HEAD(&p->wb_list);
  84. return p;
  85. }
  86. static inline void
  87. nfs_page_free(struct nfs_page *p)
  88. {
  89. kmem_cache_free(nfs_page_cachep, p);
  90. }
  91. /**
  92. * nfs_iocounter_wait - wait for i/o to complete
  93. * @l_ctx: nfs_lock_context with io_counter to use
  94. *
  95. * returns -ERESTARTSYS if interrupted by a fatal signal.
  96. * Otherwise returns 0 once the io_count hits 0.
  97. */
  98. int
  99. nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
  100. {
  101. return wait_on_atomic_t(&l_ctx->io_count, nfs_wait_atomic_killable,
  102. TASK_KILLABLE);
  103. }
  104. /*
  105. * nfs_page_group_lock - lock the head of the page group
  106. * @req - request in group that is to be locked
  107. * @nonblock - if true don't block waiting for lock
  108. *
  109. * this lock must be held if modifying the page group list
  110. *
  111. * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
  112. * result from wait_on_bit_lock
  113. *
  114. * NOTE: calling with nonblock=false should always have set the
  115. * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
  116. * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
  117. */
  118. int
  119. nfs_page_group_lock(struct nfs_page *req, bool nonblock)
  120. {
  121. struct nfs_page *head = req->wb_head;
  122. WARN_ON_ONCE(head != head->wb_head);
  123. if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
  124. return 0;
  125. if (!nonblock)
  126. return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
  127. TASK_UNINTERRUPTIBLE);
  128. return -EAGAIN;
  129. }
  130. /*
  131. * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
  132. * @req - a request in the group
  133. *
  134. * This is a blocking call to wait for the group lock to be cleared.
  135. */
  136. void
  137. nfs_page_group_lock_wait(struct nfs_page *req)
  138. {
  139. struct nfs_page *head = req->wb_head;
  140. WARN_ON_ONCE(head != head->wb_head);
  141. wait_on_bit(&head->wb_flags, PG_HEADLOCK,
  142. TASK_UNINTERRUPTIBLE);
  143. }
  144. /*
  145. * nfs_page_group_unlock - unlock the head of the page group
  146. * @req - request in group that is to be unlocked
  147. */
  148. void
  149. nfs_page_group_unlock(struct nfs_page *req)
  150. {
  151. struct nfs_page *head = req->wb_head;
  152. WARN_ON_ONCE(head != head->wb_head);
  153. smp_mb__before_atomic();
  154. clear_bit(PG_HEADLOCK, &head->wb_flags);
  155. smp_mb__after_atomic();
  156. wake_up_bit(&head->wb_flags, PG_HEADLOCK);
  157. }
  158. /*
  159. * nfs_page_group_sync_on_bit_locked
  160. *
  161. * must be called with page group lock held
  162. */
  163. static bool
  164. nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
  165. {
  166. struct nfs_page *head = req->wb_head;
  167. struct nfs_page *tmp;
  168. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
  169. WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
  170. tmp = req->wb_this_page;
  171. while (tmp != req) {
  172. if (!test_bit(bit, &tmp->wb_flags))
  173. return false;
  174. tmp = tmp->wb_this_page;
  175. }
  176. /* true! reset all bits */
  177. tmp = req;
  178. do {
  179. clear_bit(bit, &tmp->wb_flags);
  180. tmp = tmp->wb_this_page;
  181. } while (tmp != req);
  182. return true;
  183. }
  184. /*
  185. * nfs_page_group_sync_on_bit - set bit on current request, but only
  186. * return true if the bit is set for all requests in page group
  187. * @req - request in page group
  188. * @bit - PG_* bit that is used to sync page group
  189. */
  190. bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
  191. {
  192. bool ret;
  193. nfs_page_group_lock(req, false);
  194. ret = nfs_page_group_sync_on_bit_locked(req, bit);
  195. nfs_page_group_unlock(req);
  196. return ret;
  197. }
  198. /*
  199. * nfs_page_group_init - Initialize the page group linkage for @req
  200. * @req - a new nfs request
  201. * @prev - the previous request in page group, or NULL if @req is the first
  202. * or only request in the group (the head).
  203. */
  204. static inline void
  205. nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
  206. {
  207. struct inode *inode;
  208. WARN_ON_ONCE(prev == req);
  209. if (!prev) {
  210. /* a head request */
  211. req->wb_head = req;
  212. req->wb_this_page = req;
  213. } else {
  214. /* a subrequest */
  215. WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
  216. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
  217. req->wb_head = prev->wb_head;
  218. req->wb_this_page = prev->wb_this_page;
  219. prev->wb_this_page = req;
  220. /* All subrequests take a ref on the head request until
  221. * nfs_page_group_destroy is called */
  222. kref_get(&req->wb_head->wb_kref);
  223. /* grab extra ref and bump the request count if head request
  224. * has extra ref from the write/commit path to handle handoff
  225. * between write and commit lists. */
  226. if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
  227. inode = page_file_mapping(req->wb_page)->host;
  228. set_bit(PG_INODE_REF, &req->wb_flags);
  229. kref_get(&req->wb_kref);
  230. spin_lock(&inode->i_lock);
  231. NFS_I(inode)->nrequests++;
  232. spin_unlock(&inode->i_lock);
  233. }
  234. }
  235. }
  236. /*
  237. * nfs_page_group_destroy - sync the destruction of page groups
  238. * @req - request that no longer needs the page group
  239. *
  240. * releases the page group reference from each member once all
  241. * members have called this function.
  242. */
  243. static void
  244. nfs_page_group_destroy(struct kref *kref)
  245. {
  246. struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
  247. struct nfs_page *tmp, *next;
  248. /* subrequests must release the ref on the head request */
  249. if (req->wb_head != req)
  250. nfs_release_request(req->wb_head);
  251. if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
  252. return;
  253. tmp = req;
  254. do {
  255. next = tmp->wb_this_page;
  256. /* unlink and free */
  257. tmp->wb_this_page = tmp;
  258. tmp->wb_head = tmp;
  259. nfs_free_request(tmp);
  260. tmp = next;
  261. } while (tmp != req);
  262. }
  263. /**
  264. * nfs_create_request - Create an NFS read/write request.
  265. * @ctx: open context to use
  266. * @page: page to write
  267. * @last: last nfs request created for this page group or NULL if head
  268. * @offset: starting offset within the page for the write
  269. * @count: number of bytes to read/write
  270. *
  271. * The page must be locked by the caller. This makes sure we never
  272. * create two different requests for the same page.
  273. * User should ensure it is safe to sleep in this function.
  274. */
  275. struct nfs_page *
  276. nfs_create_request(struct nfs_open_context *ctx, struct page *page,
  277. struct nfs_page *last, unsigned int offset,
  278. unsigned int count)
  279. {
  280. struct nfs_page *req;
  281. struct nfs_lock_context *l_ctx;
  282. if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
  283. return ERR_PTR(-EBADF);
  284. /* try to allocate the request struct */
  285. req = nfs_page_alloc();
  286. if (req == NULL)
  287. return ERR_PTR(-ENOMEM);
  288. /* get lock context early so we can deal with alloc failures */
  289. l_ctx = nfs_get_lock_context(ctx);
  290. if (IS_ERR(l_ctx)) {
  291. nfs_page_free(req);
  292. return ERR_CAST(l_ctx);
  293. }
  294. req->wb_lock_context = l_ctx;
  295. atomic_inc(&l_ctx->io_count);
  296. /* Initialize the request struct. Initially, we assume a
  297. * long write-back delay. This will be adjusted in
  298. * update_nfs_request below if the region is not locked. */
  299. req->wb_page = page;
  300. if (page) {
  301. req->wb_index = page_index(page);
  302. get_page(page);
  303. }
  304. req->wb_offset = offset;
  305. req->wb_pgbase = offset;
  306. req->wb_bytes = count;
  307. req->wb_context = get_nfs_open_context(ctx);
  308. kref_init(&req->wb_kref);
  309. nfs_page_group_init(req, last);
  310. return req;
  311. }
  312. /**
  313. * nfs_unlock_request - Unlock request and wake up sleepers.
  314. * @req:
  315. */
  316. void nfs_unlock_request(struct nfs_page *req)
  317. {
  318. if (!NFS_WBACK_BUSY(req)) {
  319. printk(KERN_ERR "NFS: Invalid unlock attempted\n");
  320. BUG();
  321. }
  322. smp_mb__before_atomic();
  323. clear_bit(PG_BUSY, &req->wb_flags);
  324. smp_mb__after_atomic();
  325. wake_up_bit(&req->wb_flags, PG_BUSY);
  326. }
  327. /**
  328. * nfs_unlock_and_release_request - Unlock request and release the nfs_page
  329. * @req:
  330. */
  331. void nfs_unlock_and_release_request(struct nfs_page *req)
  332. {
  333. nfs_unlock_request(req);
  334. nfs_release_request(req);
  335. }
  336. /*
  337. * nfs_clear_request - Free up all resources allocated to the request
  338. * @req:
  339. *
  340. * Release page and open context resources associated with a read/write
  341. * request after it has completed.
  342. */
  343. static void nfs_clear_request(struct nfs_page *req)
  344. {
  345. struct page *page = req->wb_page;
  346. struct nfs_open_context *ctx = req->wb_context;
  347. struct nfs_lock_context *l_ctx = req->wb_lock_context;
  348. if (page != NULL) {
  349. put_page(page);
  350. req->wb_page = NULL;
  351. }
  352. if (l_ctx != NULL) {
  353. if (atomic_dec_and_test(&l_ctx->io_count))
  354. wake_up_atomic_t(&l_ctx->io_count);
  355. nfs_put_lock_context(l_ctx);
  356. req->wb_lock_context = NULL;
  357. }
  358. if (ctx != NULL) {
  359. put_nfs_open_context(ctx);
  360. req->wb_context = NULL;
  361. }
  362. }
  363. /**
  364. * nfs_release_request - Release the count on an NFS read/write request
  365. * @req: request to release
  366. *
  367. * Note: Should never be called with the spinlock held!
  368. */
  369. void nfs_free_request(struct nfs_page *req)
  370. {
  371. WARN_ON_ONCE(req->wb_this_page != req);
  372. /* extra debug: make sure no sync bits are still set */
  373. WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
  374. WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
  375. WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
  376. WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
  377. WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
  378. /* Release struct file and open context */
  379. nfs_clear_request(req);
  380. nfs_page_free(req);
  381. }
  382. void nfs_release_request(struct nfs_page *req)
  383. {
  384. kref_put(&req->wb_kref, nfs_page_group_destroy);
  385. }
  386. /**
  387. * nfs_wait_on_request - Wait for a request to complete.
  388. * @req: request to wait upon.
  389. *
  390. * Interruptible by fatal signals only.
  391. * The user is responsible for holding a count on the request.
  392. */
  393. int
  394. nfs_wait_on_request(struct nfs_page *req)
  395. {
  396. return wait_on_bit_io(&req->wb_flags, PG_BUSY,
  397. TASK_UNINTERRUPTIBLE);
  398. }
  399. /*
  400. * nfs_generic_pg_test - determine if requests can be coalesced
  401. * @desc: pointer to descriptor
  402. * @prev: previous request in desc, or NULL
  403. * @req: this request
  404. *
  405. * Returns zero if @req can be coalesced into @desc, otherwise it returns
  406. * the size of the request.
  407. */
  408. size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
  409. struct nfs_page *prev, struct nfs_page *req)
  410. {
  411. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  412. if (mirror->pg_count > mirror->pg_bsize) {
  413. /* should never happen */
  414. WARN_ON_ONCE(1);
  415. return 0;
  416. }
  417. /*
  418. * Limit the request size so that we can still allocate a page array
  419. * for it without upsetting the slab allocator.
  420. */
  421. if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
  422. sizeof(struct page *) > PAGE_SIZE)
  423. return 0;
  424. return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
  425. }
  426. EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
  427. struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
  428. {
  429. struct nfs_pgio_header *hdr = ops->rw_alloc_header();
  430. if (hdr) {
  431. INIT_LIST_HEAD(&hdr->pages);
  432. spin_lock_init(&hdr->lock);
  433. hdr->rw_ops = ops;
  434. }
  435. return hdr;
  436. }
  437. EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
  438. /**
  439. * nfs_pgio_data_destroy - make @hdr suitable for reuse
  440. *
  441. * Frees memory and releases refs from nfs_generic_pgio, so that it may
  442. * be called again.
  443. *
  444. * @hdr: A header that has had nfs_generic_pgio called
  445. */
  446. static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
  447. {
  448. if (hdr->args.context)
  449. put_nfs_open_context(hdr->args.context);
  450. if (hdr->page_array.pagevec != hdr->page_array.page_array)
  451. kfree(hdr->page_array.pagevec);
  452. }
  453. /*
  454. * nfs_pgio_header_free - Free a read or write header
  455. * @hdr: The header to free
  456. */
  457. void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
  458. {
  459. nfs_pgio_data_destroy(hdr);
  460. hdr->rw_ops->rw_free_header(hdr);
  461. }
  462. EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
  463. /**
  464. * nfs_pgio_rpcsetup - Set up arguments for a pageio call
  465. * @hdr: The pageio hdr
  466. * @count: Number of bytes to read
  467. * @offset: Initial offset
  468. * @how: How to commit data (writes only)
  469. * @cinfo: Commit information for the call (writes only)
  470. */
  471. static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
  472. unsigned int count, unsigned int offset,
  473. int how, struct nfs_commit_info *cinfo)
  474. {
  475. struct nfs_page *req = hdr->req;
  476. /* Set up the RPC argument and reply structs
  477. * NB: take care not to mess about with hdr->commit et al. */
  478. hdr->args.fh = NFS_FH(hdr->inode);
  479. hdr->args.offset = req_offset(req) + offset;
  480. /* pnfs_set_layoutcommit needs this */
  481. hdr->mds_offset = hdr->args.offset;
  482. hdr->args.pgbase = req->wb_pgbase + offset;
  483. hdr->args.pages = hdr->page_array.pagevec;
  484. hdr->args.count = count;
  485. hdr->args.context = get_nfs_open_context(req->wb_context);
  486. hdr->args.lock_context = req->wb_lock_context;
  487. hdr->args.stable = NFS_UNSTABLE;
  488. switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
  489. case 0:
  490. break;
  491. case FLUSH_COND_STABLE:
  492. if (nfs_reqs_to_commit(cinfo))
  493. break;
  494. default:
  495. hdr->args.stable = NFS_FILE_SYNC;
  496. }
  497. hdr->res.fattr = &hdr->fattr;
  498. hdr->res.count = 0;
  499. hdr->res.eof = 0;
  500. hdr->res.verf = &hdr->verf;
  501. nfs_fattr_init(&hdr->fattr);
  502. }
  503. /**
  504. * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
  505. * @task: The current task
  506. * @calldata: pageio header to prepare
  507. */
  508. static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
  509. {
  510. struct nfs_pgio_header *hdr = calldata;
  511. int err;
  512. err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
  513. if (err)
  514. rpc_exit(task, err);
  515. }
  516. int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
  517. struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
  518. const struct rpc_call_ops *call_ops, int how, int flags)
  519. {
  520. struct rpc_task *task;
  521. struct rpc_message msg = {
  522. .rpc_argp = &hdr->args,
  523. .rpc_resp = &hdr->res,
  524. .rpc_cred = cred,
  525. };
  526. struct rpc_task_setup task_setup_data = {
  527. .rpc_client = clnt,
  528. .task = &hdr->task,
  529. .rpc_message = &msg,
  530. .callback_ops = call_ops,
  531. .callback_data = hdr,
  532. .workqueue = nfsiod_workqueue,
  533. .flags = RPC_TASK_ASYNC | flags,
  534. };
  535. int ret = 0;
  536. hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
  537. dprintk("NFS: initiated pgio call "
  538. "(req %s/%llu, %u bytes @ offset %llu)\n",
  539. hdr->inode->i_sb->s_id,
  540. (unsigned long long)NFS_FILEID(hdr->inode),
  541. hdr->args.count,
  542. (unsigned long long)hdr->args.offset);
  543. task = rpc_run_task(&task_setup_data);
  544. if (IS_ERR(task)) {
  545. ret = PTR_ERR(task);
  546. goto out;
  547. }
  548. if (how & FLUSH_SYNC) {
  549. ret = rpc_wait_for_completion_task(task);
  550. if (ret == 0)
  551. ret = task->tk_status;
  552. }
  553. rpc_put_task(task);
  554. out:
  555. return ret;
  556. }
  557. EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
  558. /**
  559. * nfs_pgio_error - Clean up from a pageio error
  560. * @desc: IO descriptor
  561. * @hdr: pageio header
  562. */
  563. static void nfs_pgio_error(struct nfs_pgio_header *hdr)
  564. {
  565. set_bit(NFS_IOHDR_REDO, &hdr->flags);
  566. hdr->completion_ops->completion(hdr);
  567. }
  568. /**
  569. * nfs_pgio_release - Release pageio data
  570. * @calldata: The pageio header to release
  571. */
  572. static void nfs_pgio_release(void *calldata)
  573. {
  574. struct nfs_pgio_header *hdr = calldata;
  575. hdr->completion_ops->completion(hdr);
  576. }
  577. static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
  578. unsigned int bsize)
  579. {
  580. INIT_LIST_HEAD(&mirror->pg_list);
  581. mirror->pg_bytes_written = 0;
  582. mirror->pg_count = 0;
  583. mirror->pg_bsize = bsize;
  584. mirror->pg_base = 0;
  585. mirror->pg_recoalesce = 0;
  586. }
  587. /**
  588. * nfs_pageio_init - initialise a page io descriptor
  589. * @desc: pointer to descriptor
  590. * @inode: pointer to inode
  591. * @pg_ops: pointer to pageio operations
  592. * @compl_ops: pointer to pageio completion operations
  593. * @rw_ops: pointer to nfs read/write operations
  594. * @bsize: io block size
  595. * @io_flags: extra parameters for the io function
  596. */
  597. void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
  598. struct inode *inode,
  599. const struct nfs_pageio_ops *pg_ops,
  600. const struct nfs_pgio_completion_ops *compl_ops,
  601. const struct nfs_rw_ops *rw_ops,
  602. size_t bsize,
  603. int io_flags)
  604. {
  605. struct nfs_pgio_mirror *new;
  606. int i;
  607. gfp_t gfp_flags = GFP_KERNEL;
  608. desc->pg_moreio = 0;
  609. desc->pg_inode = inode;
  610. desc->pg_ops = pg_ops;
  611. desc->pg_completion_ops = compl_ops;
  612. desc->pg_rw_ops = rw_ops;
  613. desc->pg_ioflags = io_flags;
  614. desc->pg_error = 0;
  615. desc->pg_lseg = NULL;
  616. desc->pg_dreq = NULL;
  617. desc->pg_layout_private = NULL;
  618. desc->pg_bsize = bsize;
  619. desc->pg_mirror_count = 1;
  620. desc->pg_mirror_idx = 0;
  621. if (pg_ops->pg_get_mirror_count) {
  622. /* until we have a request, we don't have an lseg and no
  623. * idea how many mirrors there will be */
  624. if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
  625. gfp_flags = GFP_NOIO;
  626. new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
  627. sizeof(struct nfs_pgio_mirror), gfp_flags);
  628. desc->pg_mirrors_dynamic = new;
  629. desc->pg_mirrors = new;
  630. for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
  631. nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
  632. } else {
  633. desc->pg_mirrors_dynamic = NULL;
  634. desc->pg_mirrors = desc->pg_mirrors_static;
  635. nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
  636. }
  637. }
  638. EXPORT_SYMBOL_GPL(nfs_pageio_init);
  639. /**
  640. * nfs_pgio_result - Basic pageio error handling
  641. * @task: The task that ran
  642. * @calldata: Pageio header to check
  643. */
  644. static void nfs_pgio_result(struct rpc_task *task, void *calldata)
  645. {
  646. struct nfs_pgio_header *hdr = calldata;
  647. struct inode *inode = hdr->inode;
  648. dprintk("NFS: %s: %5u, (status %d)\n", __func__,
  649. task->tk_pid, task->tk_status);
  650. if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
  651. return;
  652. if (task->tk_status < 0)
  653. nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
  654. else
  655. hdr->rw_ops->rw_result(task, hdr);
  656. }
  657. /*
  658. * Create an RPC task for the given read or write request and kick it.
  659. * The page must have been locked by the caller.
  660. *
  661. * It may happen that the page we're passed is not marked dirty.
  662. * This is the case if nfs_updatepage detects a conflicting request
  663. * that has been written but not committed.
  664. */
  665. int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
  666. struct nfs_pgio_header *hdr)
  667. {
  668. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  669. struct nfs_page *req;
  670. struct page **pages,
  671. *last_page;
  672. struct list_head *head = &mirror->pg_list;
  673. struct nfs_commit_info cinfo;
  674. unsigned int pagecount, pageused;
  675. gfp_t gfp_flags = GFP_KERNEL;
  676. pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
  677. if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
  678. gfp_flags = GFP_NOIO;
  679. if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) {
  680. nfs_pgio_error(hdr);
  681. desc->pg_error = -ENOMEM;
  682. return desc->pg_error;
  683. }
  684. nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
  685. pages = hdr->page_array.pagevec;
  686. last_page = NULL;
  687. pageused = 0;
  688. while (!list_empty(head)) {
  689. req = nfs_list_entry(head->next);
  690. nfs_list_remove_request(req);
  691. nfs_list_add_request(req, &hdr->pages);
  692. if (!last_page || last_page != req->wb_page) {
  693. pageused++;
  694. if (pageused > pagecount)
  695. break;
  696. *pages++ = last_page = req->wb_page;
  697. }
  698. }
  699. if (WARN_ON_ONCE(pageused != pagecount)) {
  700. nfs_pgio_error(hdr);
  701. desc->pg_error = -EINVAL;
  702. return desc->pg_error;
  703. }
  704. if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
  705. (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
  706. desc->pg_ioflags &= ~FLUSH_COND_STABLE;
  707. /* Set up the argument struct */
  708. nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo);
  709. desc->pg_rpc_callops = &nfs_pgio_common_ops;
  710. return 0;
  711. }
  712. EXPORT_SYMBOL_GPL(nfs_generic_pgio);
  713. static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
  714. {
  715. struct nfs_pgio_header *hdr;
  716. int ret;
  717. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  718. if (!hdr) {
  719. desc->pg_error = -ENOMEM;
  720. return desc->pg_error;
  721. }
  722. nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
  723. ret = nfs_generic_pgio(desc, hdr);
  724. if (ret == 0)
  725. ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
  726. hdr,
  727. hdr->cred,
  728. NFS_PROTO(hdr->inode),
  729. desc->pg_rpc_callops,
  730. desc->pg_ioflags, 0);
  731. return ret;
  732. }
  733. /*
  734. * nfs_pageio_setup_mirroring - determine if mirroring is to be used
  735. * by calling the pg_get_mirror_count op
  736. */
  737. static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
  738. struct nfs_page *req)
  739. {
  740. int mirror_count = 1;
  741. if (!pgio->pg_ops->pg_get_mirror_count)
  742. return 0;
  743. mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
  744. if (pgio->pg_error < 0)
  745. return pgio->pg_error;
  746. if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
  747. return -EINVAL;
  748. if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
  749. return -EINVAL;
  750. pgio->pg_mirror_count = mirror_count;
  751. return 0;
  752. }
  753. /*
  754. * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
  755. */
  756. void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
  757. {
  758. pgio->pg_mirror_count = 1;
  759. pgio->pg_mirror_idx = 0;
  760. }
  761. static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
  762. {
  763. pgio->pg_mirror_count = 1;
  764. pgio->pg_mirror_idx = 0;
  765. pgio->pg_mirrors = pgio->pg_mirrors_static;
  766. kfree(pgio->pg_mirrors_dynamic);
  767. pgio->pg_mirrors_dynamic = NULL;
  768. }
  769. static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
  770. const struct nfs_lock_context *l2)
  771. {
  772. return l1->lockowner.l_owner == l2->lockowner.l_owner
  773. && l1->lockowner.l_pid == l2->lockowner.l_pid;
  774. }
  775. /**
  776. * nfs_can_coalesce_requests - test two requests for compatibility
  777. * @prev: pointer to nfs_page
  778. * @req: pointer to nfs_page
  779. *
  780. * The nfs_page structures 'prev' and 'req' are compared to ensure that the
  781. * page data area they describe is contiguous, and that their RPC
  782. * credentials, NFSv4 open state, and lockowners are the same.
  783. *
  784. * Return 'true' if this is the case, else return 'false'.
  785. */
  786. static bool nfs_can_coalesce_requests(struct nfs_page *prev,
  787. struct nfs_page *req,
  788. struct nfs_pageio_descriptor *pgio)
  789. {
  790. size_t size;
  791. struct file_lock_context *flctx;
  792. if (prev) {
  793. if (!nfs_match_open_context(req->wb_context, prev->wb_context))
  794. return false;
  795. flctx = d_inode(req->wb_context->dentry)->i_flctx;
  796. if (flctx != NULL &&
  797. !(list_empty_careful(&flctx->flc_posix) &&
  798. list_empty_careful(&flctx->flc_flock)) &&
  799. !nfs_match_lock_context(req->wb_lock_context,
  800. prev->wb_lock_context))
  801. return false;
  802. if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
  803. return false;
  804. if (req->wb_page == prev->wb_page) {
  805. if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
  806. return false;
  807. } else {
  808. if (req->wb_pgbase != 0 ||
  809. prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
  810. return false;
  811. }
  812. }
  813. size = pgio->pg_ops->pg_test(pgio, prev, req);
  814. WARN_ON_ONCE(size > req->wb_bytes);
  815. if (size && size < req->wb_bytes)
  816. req->wb_bytes = size;
  817. return size > 0;
  818. }
  819. /**
  820. * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
  821. * @desc: destination io descriptor
  822. * @req: request
  823. *
  824. * Returns true if the request 'req' was successfully coalesced into the
  825. * existing list of pages 'desc'.
  826. */
  827. static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
  828. struct nfs_page *req)
  829. {
  830. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  831. struct nfs_page *prev = NULL;
  832. if (mirror->pg_count != 0) {
  833. prev = nfs_list_entry(mirror->pg_list.prev);
  834. } else {
  835. if (desc->pg_ops->pg_init)
  836. desc->pg_ops->pg_init(desc, req);
  837. if (desc->pg_error < 0)
  838. return 0;
  839. mirror->pg_base = req->wb_pgbase;
  840. }
  841. if (!nfs_can_coalesce_requests(prev, req, desc))
  842. return 0;
  843. nfs_list_remove_request(req);
  844. nfs_list_add_request(req, &mirror->pg_list);
  845. mirror->pg_count += req->wb_bytes;
  846. return 1;
  847. }
  848. /*
  849. * Helper for nfs_pageio_add_request and nfs_pageio_complete
  850. */
  851. static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
  852. {
  853. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  854. if (!list_empty(&mirror->pg_list)) {
  855. int error = desc->pg_ops->pg_doio(desc);
  856. if (error < 0)
  857. desc->pg_error = error;
  858. else
  859. mirror->pg_bytes_written += mirror->pg_count;
  860. }
  861. if (list_empty(&mirror->pg_list)) {
  862. mirror->pg_count = 0;
  863. mirror->pg_base = 0;
  864. }
  865. }
  866. static void
  867. nfs_pageio_cleanup_request(struct nfs_pageio_descriptor *desc,
  868. struct nfs_page *req)
  869. {
  870. LIST_HEAD(head);
  871. nfs_list_remove_request(req);
  872. nfs_list_add_request(req, &head);
  873. desc->pg_completion_ops->error_cleanup(&head);
  874. }
  875. /**
  876. * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
  877. * @desc: destination io descriptor
  878. * @req: request
  879. *
  880. * This may split a request into subrequests which are all part of the
  881. * same page group.
  882. *
  883. * Returns true if the request 'req' was successfully coalesced into the
  884. * existing list of pages 'desc'.
  885. */
  886. static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  887. struct nfs_page *req)
  888. {
  889. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  890. struct nfs_page *subreq;
  891. unsigned int bytes_left = 0;
  892. unsigned int offset, pgbase;
  893. nfs_page_group_lock(req, false);
  894. subreq = req;
  895. bytes_left = subreq->wb_bytes;
  896. offset = subreq->wb_offset;
  897. pgbase = subreq->wb_pgbase;
  898. do {
  899. if (!nfs_pageio_do_add_request(desc, subreq)) {
  900. /* make sure pg_test call(s) did nothing */
  901. WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
  902. WARN_ON_ONCE(subreq->wb_offset != offset);
  903. WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
  904. nfs_page_group_unlock(req);
  905. desc->pg_moreio = 1;
  906. nfs_pageio_doio(desc);
  907. if (desc->pg_error < 0 || mirror->pg_recoalesce)
  908. goto out_cleanup_subreq;
  909. /* retry add_request for this subreq */
  910. nfs_page_group_lock(req, false);
  911. continue;
  912. }
  913. /* check for buggy pg_test call(s) */
  914. WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
  915. WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
  916. WARN_ON_ONCE(subreq->wb_bytes == 0);
  917. bytes_left -= subreq->wb_bytes;
  918. offset += subreq->wb_bytes;
  919. pgbase += subreq->wb_bytes;
  920. if (bytes_left) {
  921. subreq = nfs_create_request(req->wb_context,
  922. req->wb_page,
  923. subreq, pgbase, bytes_left);
  924. if (IS_ERR(subreq))
  925. goto err_ptr;
  926. nfs_lock_request(subreq);
  927. subreq->wb_offset = offset;
  928. subreq->wb_index = req->wb_index;
  929. }
  930. } while (bytes_left > 0);
  931. nfs_page_group_unlock(req);
  932. return 1;
  933. err_ptr:
  934. desc->pg_error = PTR_ERR(subreq);
  935. nfs_page_group_unlock(req);
  936. return 0;
  937. out_cleanup_subreq:
  938. if (req != subreq)
  939. nfs_pageio_cleanup_request(desc, subreq);
  940. return 0;
  941. }
  942. static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
  943. {
  944. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  945. LIST_HEAD(head);
  946. do {
  947. list_splice_init(&mirror->pg_list, &head);
  948. mirror->pg_bytes_written -= mirror->pg_count;
  949. mirror->pg_count = 0;
  950. mirror->pg_base = 0;
  951. mirror->pg_recoalesce = 0;
  952. while (!list_empty(&head)) {
  953. struct nfs_page *req;
  954. req = list_first_entry(&head, struct nfs_page, wb_list);
  955. if (__nfs_pageio_add_request(desc, req))
  956. continue;
  957. if (desc->pg_error < 0) {
  958. list_splice_tail(&head, &mirror->pg_list);
  959. mirror->pg_recoalesce = 1;
  960. return 0;
  961. }
  962. break;
  963. }
  964. } while (mirror->pg_recoalesce);
  965. return 1;
  966. }
  967. static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
  968. struct nfs_page *req)
  969. {
  970. int ret;
  971. do {
  972. ret = __nfs_pageio_add_request(desc, req);
  973. if (ret)
  974. break;
  975. if (desc->pg_error < 0)
  976. break;
  977. ret = nfs_do_recoalesce(desc);
  978. } while (ret);
  979. return ret;
  980. }
  981. int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  982. struct nfs_page *req)
  983. {
  984. u32 midx;
  985. unsigned int pgbase, offset, bytes;
  986. struct nfs_page *dupreq, *lastreq;
  987. pgbase = req->wb_pgbase;
  988. offset = req->wb_offset;
  989. bytes = req->wb_bytes;
  990. nfs_pageio_setup_mirroring(desc, req);
  991. if (desc->pg_error < 0)
  992. goto out_failed;
  993. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  994. if (midx) {
  995. nfs_page_group_lock(req, false);
  996. /* find the last request */
  997. for (lastreq = req->wb_head;
  998. lastreq->wb_this_page != req->wb_head;
  999. lastreq = lastreq->wb_this_page)
  1000. ;
  1001. dupreq = nfs_create_request(req->wb_context,
  1002. req->wb_page, lastreq, pgbase, bytes);
  1003. if (IS_ERR(dupreq)) {
  1004. nfs_page_group_unlock(req);
  1005. desc->pg_error = PTR_ERR(dupreq);
  1006. goto out_failed;
  1007. }
  1008. nfs_lock_request(dupreq);
  1009. nfs_page_group_unlock(req);
  1010. dupreq->wb_offset = offset;
  1011. dupreq->wb_index = req->wb_index;
  1012. } else
  1013. dupreq = req;
  1014. if (nfs_pgio_has_mirroring(desc))
  1015. desc->pg_mirror_idx = midx;
  1016. if (!nfs_pageio_add_request_mirror(desc, dupreq))
  1017. goto out_cleanup_subreq;
  1018. }
  1019. return 1;
  1020. out_cleanup_subreq:
  1021. if (req != dupreq)
  1022. nfs_pageio_cleanup_request(desc, dupreq);
  1023. out_failed:
  1024. /*
  1025. * We might have failed before sending any reqs over wire.
  1026. * Clean up rest of the reqs in mirror pg_list.
  1027. */
  1028. if (desc->pg_error) {
  1029. struct nfs_pgio_mirror *mirror;
  1030. void (*func)(struct list_head *);
  1031. /* remember fatal errors */
  1032. if (nfs_error_is_fatal(desc->pg_error))
  1033. mapping_set_error(desc->pg_inode->i_mapping,
  1034. desc->pg_error);
  1035. func = desc->pg_completion_ops->error_cleanup;
  1036. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1037. mirror = &desc->pg_mirrors[midx];
  1038. func(&mirror->pg_list);
  1039. }
  1040. }
  1041. return 0;
  1042. }
  1043. /*
  1044. * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
  1045. * nfs_pageio_descriptor
  1046. * @desc: pointer to io descriptor
  1047. * @mirror_idx: pointer to mirror index
  1048. */
  1049. static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
  1050. u32 mirror_idx)
  1051. {
  1052. struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
  1053. u32 restore_idx = desc->pg_mirror_idx;
  1054. if (nfs_pgio_has_mirroring(desc))
  1055. desc->pg_mirror_idx = mirror_idx;
  1056. for (;;) {
  1057. nfs_pageio_doio(desc);
  1058. if (desc->pg_error < 0 || !mirror->pg_recoalesce)
  1059. break;
  1060. if (!nfs_do_recoalesce(desc))
  1061. break;
  1062. }
  1063. desc->pg_mirror_idx = restore_idx;
  1064. }
  1065. /*
  1066. * nfs_pageio_resend - Transfer requests to new descriptor and resend
  1067. * @hdr - the pgio header to move request from
  1068. * @desc - the pageio descriptor to add requests to
  1069. *
  1070. * Try to move each request (nfs_page) from @hdr to @desc then attempt
  1071. * to send them.
  1072. *
  1073. * Returns 0 on success and < 0 on error.
  1074. */
  1075. int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
  1076. struct nfs_pgio_header *hdr)
  1077. {
  1078. LIST_HEAD(failed);
  1079. desc->pg_dreq = hdr->dreq;
  1080. while (!list_empty(&hdr->pages)) {
  1081. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  1082. nfs_list_remove_request(req);
  1083. if (!nfs_pageio_add_request(desc, req))
  1084. nfs_list_add_request(req, &failed);
  1085. }
  1086. nfs_pageio_complete(desc);
  1087. if (!list_empty(&failed)) {
  1088. list_move(&failed, &hdr->pages);
  1089. return desc->pg_error < 0 ? desc->pg_error : -EIO;
  1090. }
  1091. return 0;
  1092. }
  1093. EXPORT_SYMBOL_GPL(nfs_pageio_resend);
  1094. /**
  1095. * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
  1096. * @desc: pointer to io descriptor
  1097. */
  1098. void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
  1099. {
  1100. u32 midx;
  1101. for (midx = 0; midx < desc->pg_mirror_count; midx++)
  1102. nfs_pageio_complete_mirror(desc, midx);
  1103. if (desc->pg_ops->pg_cleanup)
  1104. desc->pg_ops->pg_cleanup(desc);
  1105. nfs_pageio_cleanup_mirroring(desc);
  1106. }
  1107. /**
  1108. * nfs_pageio_cond_complete - Conditional I/O completion
  1109. * @desc: pointer to io descriptor
  1110. * @index: page index
  1111. *
  1112. * It is important to ensure that processes don't try to take locks
  1113. * on non-contiguous ranges of pages as that might deadlock. This
  1114. * function should be called before attempting to wait on a locked
  1115. * nfs_page. It will complete the I/O if the page index 'index'
  1116. * is not contiguous with the existing list of pages in 'desc'.
  1117. */
  1118. void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
  1119. {
  1120. struct nfs_pgio_mirror *mirror;
  1121. struct nfs_page *prev;
  1122. u32 midx;
  1123. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1124. mirror = &desc->pg_mirrors[midx];
  1125. if (!list_empty(&mirror->pg_list)) {
  1126. prev = nfs_list_entry(mirror->pg_list.prev);
  1127. if (index != prev->wb_index + 1) {
  1128. nfs_pageio_complete(desc);
  1129. break;
  1130. }
  1131. }
  1132. }
  1133. }
  1134. int __init nfs_init_nfspagecache(void)
  1135. {
  1136. nfs_page_cachep = kmem_cache_create("nfs_page",
  1137. sizeof(struct nfs_page),
  1138. 0, SLAB_HWCACHE_ALIGN,
  1139. NULL);
  1140. if (nfs_page_cachep == NULL)
  1141. return -ENOMEM;
  1142. return 0;
  1143. }
  1144. void nfs_destroy_nfspagecache(void)
  1145. {
  1146. kmem_cache_destroy(nfs_page_cachep);
  1147. }
  1148. static const struct rpc_call_ops nfs_pgio_common_ops = {
  1149. .rpc_call_prepare = nfs_pgio_prepare,
  1150. .rpc_call_done = nfs_pgio_result,
  1151. .rpc_release = nfs_pgio_release,
  1152. };
  1153. const struct nfs_pageio_ops nfs_pgio_rw_ops = {
  1154. .pg_test = nfs_generic_pg_test,
  1155. .pg_doio = nfs_generic_pg_pgios,
  1156. };