123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075 |
- /*
- * Copyright (C) 2007 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License v2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
- #include <linux/fs.h>
- #include <linux/pagemap.h>
- #include <linux/highmem.h>
- #include <linux/time.h>
- #include <linux/init.h>
- #include <linux/string.h>
- #include <linux/backing-dev.h>
- #include <linux/mpage.h>
- #include <linux/falloc.h>
- #include <linux/swap.h>
- #include <linux/writeback.h>
- #include <linux/statfs.h>
- #include <linux/compat.h>
- #include <linux/slab.h>
- #include <linux/btrfs.h>
- #include <linux/uio.h>
- #include "ctree.h"
- #include "disk-io.h"
- #include "transaction.h"
- #include "btrfs_inode.h"
- #include "print-tree.h"
- #include "tree-log.h"
- #include "locking.h"
- #include "volumes.h"
- #include "qgroup.h"
- #include "compression.h"
- static struct kmem_cache *btrfs_inode_defrag_cachep;
- /*
- * when auto defrag is enabled we
- * queue up these defrag structs to remember which
- * inodes need defragging passes
- */
- struct inode_defrag {
- struct rb_node rb_node;
- /* objectid */
- u64 ino;
- /*
- * transid where the defrag was added, we search for
- * extents newer than this
- */
- u64 transid;
- /* root objectid */
- u64 root;
- /* last offset we were able to defrag */
- u64 last_offset;
- /* if we've wrapped around back to zero once already */
- int cycled;
- };
- static int __compare_inode_defrag(struct inode_defrag *defrag1,
- struct inode_defrag *defrag2)
- {
- if (defrag1->root > defrag2->root)
- return 1;
- else if (defrag1->root < defrag2->root)
- return -1;
- else if (defrag1->ino > defrag2->ino)
- return 1;
- else if (defrag1->ino < defrag2->ino)
- return -1;
- else
- return 0;
- }
- /* pop a record for an inode into the defrag tree. The lock
- * must be held already
- *
- * If you're inserting a record for an older transid than an
- * existing record, the transid already in the tree is lowered
- *
- * If an existing record is found the defrag item you
- * pass in is freed
- */
- static int __btrfs_add_inode_defrag(struct inode *inode,
- struct inode_defrag *defrag)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct inode_defrag *entry;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- int ret;
- p = &root->fs_info->defrag_inodes.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct inode_defrag, rb_node);
- ret = __compare_inode_defrag(defrag, entry);
- if (ret < 0)
- p = &parent->rb_left;
- else if (ret > 0)
- p = &parent->rb_right;
- else {
- /* if we're reinserting an entry for
- * an old defrag run, make sure to
- * lower the transid of our existing record
- */
- if (defrag->transid < entry->transid)
- entry->transid = defrag->transid;
- if (defrag->last_offset > entry->last_offset)
- entry->last_offset = defrag->last_offset;
- return -EEXIST;
- }
- }
- set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
- rb_link_node(&defrag->rb_node, parent, p);
- rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
- return 0;
- }
- static inline int __need_auto_defrag(struct btrfs_root *root)
- {
- if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
- return 0;
- if (btrfs_fs_closing(root->fs_info))
- return 0;
- return 1;
- }
- /*
- * insert a defrag record for this inode if auto defrag is
- * enabled
- */
- int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
- struct inode *inode)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct inode_defrag *defrag;
- u64 transid;
- int ret;
- if (!__need_auto_defrag(root))
- return 0;
- if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
- return 0;
- if (trans)
- transid = trans->transid;
- else
- transid = BTRFS_I(inode)->root->last_trans;
- defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
- if (!defrag)
- return -ENOMEM;
- defrag->ino = btrfs_ino(inode);
- defrag->transid = transid;
- defrag->root = root->root_key.objectid;
- spin_lock(&root->fs_info->defrag_inodes_lock);
- if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
- /*
- * If we set IN_DEFRAG flag and evict the inode from memory,
- * and then re-read this inode, this new inode doesn't have
- * IN_DEFRAG flag. At the case, we may find the existed defrag.
- */
- ret = __btrfs_add_inode_defrag(inode, defrag);
- if (ret)
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
- spin_unlock(&root->fs_info->defrag_inodes_lock);
- return 0;
- }
- /*
- * Requeue the defrag object. If there is a defrag object that points to
- * the same inode in the tree, we will merge them together (by
- * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
- */
- static void btrfs_requeue_inode_defrag(struct inode *inode,
- struct inode_defrag *defrag)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- int ret;
- if (!__need_auto_defrag(root))
- goto out;
- /*
- * Here we don't check the IN_DEFRAG flag, because we need merge
- * them together.
- */
- spin_lock(&root->fs_info->defrag_inodes_lock);
- ret = __btrfs_add_inode_defrag(inode, defrag);
- spin_unlock(&root->fs_info->defrag_inodes_lock);
- if (ret)
- goto out;
- return;
- out:
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
- /*
- * pick the defragable inode that we want, if it doesn't exist, we will get
- * the next one.
- */
- static struct inode_defrag *
- btrfs_pick_defrag_inode(struct btrfs_fs_info *fs_info, u64 root, u64 ino)
- {
- struct inode_defrag *entry = NULL;
- struct inode_defrag tmp;
- struct rb_node *p;
- struct rb_node *parent = NULL;
- int ret;
- tmp.ino = ino;
- tmp.root = root;
- spin_lock(&fs_info->defrag_inodes_lock);
- p = fs_info->defrag_inodes.rb_node;
- while (p) {
- parent = p;
- entry = rb_entry(parent, struct inode_defrag, rb_node);
- ret = __compare_inode_defrag(&tmp, entry);
- if (ret < 0)
- p = parent->rb_left;
- else if (ret > 0)
- p = parent->rb_right;
- else
- goto out;
- }
- if (parent && __compare_inode_defrag(&tmp, entry) > 0) {
- parent = rb_next(parent);
- if (parent)
- entry = rb_entry(parent, struct inode_defrag, rb_node);
- else
- entry = NULL;
- }
- out:
- if (entry)
- rb_erase(parent, &fs_info->defrag_inodes);
- spin_unlock(&fs_info->defrag_inodes_lock);
- return entry;
- }
- void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info)
- {
- struct inode_defrag *defrag;
- struct rb_node *node;
- spin_lock(&fs_info->defrag_inodes_lock);
- node = rb_first(&fs_info->defrag_inodes);
- while (node) {
- rb_erase(node, &fs_info->defrag_inodes);
- defrag = rb_entry(node, struct inode_defrag, rb_node);
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- cond_resched_lock(&fs_info->defrag_inodes_lock);
- node = rb_first(&fs_info->defrag_inodes);
- }
- spin_unlock(&fs_info->defrag_inodes_lock);
- }
- #define BTRFS_DEFRAG_BATCH 1024
- static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
- struct inode_defrag *defrag)
- {
- struct btrfs_root *inode_root;
- struct inode *inode;
- struct btrfs_key key;
- struct btrfs_ioctl_defrag_range_args range;
- int num_defrag;
- int index;
- int ret;
- /* get the inode */
- key.objectid = defrag->root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- index = srcu_read_lock(&fs_info->subvol_srcu);
- inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
- if (IS_ERR(inode_root)) {
- ret = PTR_ERR(inode_root);
- goto cleanup;
- }
- key.objectid = defrag->ino;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
- if (IS_ERR(inode)) {
- ret = PTR_ERR(inode);
- goto cleanup;
- }
- srcu_read_unlock(&fs_info->subvol_srcu, index);
- /* do a chunk of defrag */
- clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
- memset(&range, 0, sizeof(range));
- range.len = (u64)-1;
- range.start = defrag->last_offset;
- sb_start_write(fs_info->sb);
- num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
- BTRFS_DEFRAG_BATCH);
- sb_end_write(fs_info->sb);
- /*
- * if we filled the whole defrag batch, there
- * must be more work to do. Queue this defrag
- * again
- */
- if (num_defrag == BTRFS_DEFRAG_BATCH) {
- defrag->last_offset = range.start;
- btrfs_requeue_inode_defrag(inode, defrag);
- } else if (defrag->last_offset && !defrag->cycled) {
- /*
- * we didn't fill our defrag batch, but
- * we didn't start at zero. Make sure we loop
- * around to the start of the file.
- */
- defrag->last_offset = 0;
- defrag->cycled = 1;
- btrfs_requeue_inode_defrag(inode, defrag);
- } else {
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- }
- iput(inode);
- return 0;
- cleanup:
- srcu_read_unlock(&fs_info->subvol_srcu, index);
- kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
- return ret;
- }
- /*
- * run through the list of inodes in the FS that need
- * defragging
- */
- int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
- {
- struct inode_defrag *defrag;
- u64 first_ino = 0;
- u64 root_objectid = 0;
- atomic_inc(&fs_info->defrag_running);
- while (1) {
- /* Pause the auto defragger. */
- if (test_bit(BTRFS_FS_STATE_REMOUNTING,
- &fs_info->fs_state))
- break;
- if (!__need_auto_defrag(fs_info->tree_root))
- break;
- /* find an inode to defrag */
- defrag = btrfs_pick_defrag_inode(fs_info, root_objectid,
- first_ino);
- if (!defrag) {
- if (root_objectid || first_ino) {
- root_objectid = 0;
- first_ino = 0;
- continue;
- } else {
- break;
- }
- }
- first_ino = defrag->ino + 1;
- root_objectid = defrag->root;
- __btrfs_run_defrag_inode(fs_info, defrag);
- }
- atomic_dec(&fs_info->defrag_running);
- /*
- * during unmount, we use the transaction_wait queue to
- * wait for the defragger to stop
- */
- wake_up(&fs_info->transaction_wait);
- return 0;
- }
- /* simple helper to fault in pages and copy. This should go away
- * and be replaced with calls into generic code.
- */
- static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
- struct page **prepared_pages,
- struct iov_iter *i)
- {
- size_t copied = 0;
- size_t total_copied = 0;
- int pg = 0;
- int offset = pos & (PAGE_SIZE - 1);
- while (write_bytes > 0) {
- size_t count = min_t(size_t,
- PAGE_SIZE - offset, write_bytes);
- struct page *page = prepared_pages[pg];
- /*
- * Copy data from userspace to the current page
- */
- copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
- /* Flush processor's dcache for this page */
- flush_dcache_page(page);
- /*
- * if we get a partial write, we can end up with
- * partially up to date pages. These add
- * a lot of complexity, so make sure they don't
- * happen by forcing this copy to be retried.
- *
- * The rest of the btrfs_file_write code will fall
- * back to page at a time copies after we return 0.
- */
- if (!PageUptodate(page) && copied < count)
- copied = 0;
- iov_iter_advance(i, copied);
- write_bytes -= copied;
- total_copied += copied;
- /* Return to btrfs_file_write_iter to fault page */
- if (unlikely(copied == 0))
- break;
- if (copied < PAGE_SIZE - offset) {
- offset += copied;
- } else {
- pg++;
- offset = 0;
- }
- }
- return total_copied;
- }
- /*
- * unlocks pages after btrfs_file_write is done with them
- */
- static void btrfs_drop_pages(struct page **pages, size_t num_pages)
- {
- size_t i;
- for (i = 0; i < num_pages; i++) {
- /* page checked is some magic around finding pages that
- * have been modified without going through btrfs_set_page_dirty
- * clear it here. There should be no need to mark the pages
- * accessed as prepare_pages should have marked them accessed
- * in prepare_pages via find_or_create_page()
- */
- ClearPageChecked(pages[i]);
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
- }
- /*
- * after copy_from_user, pages need to be dirtied and we need to make
- * sure holes are created between the current EOF and the start of
- * any next extents (if required).
- *
- * this also makes the decision about creating an inline extent vs
- * doing real data extents, marking pages dirty and delalloc as required.
- */
- int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
- struct page **pages, size_t num_pages,
- loff_t pos, size_t write_bytes,
- struct extent_state **cached)
- {
- int err = 0;
- int i;
- u64 num_bytes;
- u64 start_pos;
- u64 end_of_last_block;
- u64 end_pos = pos + write_bytes;
- loff_t isize = i_size_read(inode);
- start_pos = pos & ~((u64)root->sectorsize - 1);
- num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
- end_of_last_block = start_pos + num_bytes - 1;
- err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- cached, 0);
- if (err)
- return err;
- for (i = 0; i < num_pages; i++) {
- struct page *p = pages[i];
- SetPageUptodate(p);
- ClearPageChecked(p);
- set_page_dirty(p);
- }
- /*
- * we've only changed i_size in ram, and we haven't updated
- * the disk i_size. There is no need to log the inode
- * at this time.
- */
- if (end_pos > isize)
- i_size_write(inode, end_pos);
- return 0;
- }
- /*
- * this drops all the extents in the cache that intersect the range
- * [start, end]. Existing extents are split as required.
- */
- void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
- int skip_pinned)
- {
- struct extent_map *em;
- struct extent_map *split = NULL;
- struct extent_map *split2 = NULL;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- u64 len = end - start + 1;
- u64 gen;
- int ret;
- int testend = 1;
- unsigned long flags;
- int compressed = 0;
- bool modified;
- WARN_ON(end < start);
- if (end == (u64)-1) {
- len = (u64)-1;
- testend = 0;
- }
- while (1) {
- int no_splits = 0;
- modified = false;
- if (!split)
- split = alloc_extent_map();
- if (!split2)
- split2 = alloc_extent_map();
- if (!split || !split2)
- no_splits = 1;
- write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, start, len);
- if (!em) {
- write_unlock(&em_tree->lock);
- break;
- }
- flags = em->flags;
- gen = em->generation;
- if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
- if (testend && em->start + em->len >= start + len) {
- free_extent_map(em);
- write_unlock(&em_tree->lock);
- break;
- }
- start = em->start + em->len;
- if (testend)
- len = start + len - (em->start + em->len);
- free_extent_map(em);
- write_unlock(&em_tree->lock);
- continue;
- }
- compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
- clear_bit(EXTENT_FLAG_PINNED, &em->flags);
- clear_bit(EXTENT_FLAG_LOGGING, &flags);
- modified = !list_empty(&em->list);
- if (no_splits)
- goto next;
- if (em->start < start) {
- split->start = em->start;
- split->len = start - em->start;
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- } else {
- split->orig_start = split->start;
- split->block_len = 0;
- split->block_start = em->block_start;
- split->orig_block_len = 0;
- split->ram_bytes = split->len;
- }
- split->generation = gen;
- split->bdev = em->bdev;
- split->flags = flags;
- split->compress_type = em->compress_type;
- replace_extent_mapping(em_tree, em, split, modified);
- free_extent_map(split);
- split = split2;
- split2 = NULL;
- }
- if (testend && em->start + em->len > start + len) {
- u64 diff = start + len - em->start;
- split->start = start + len;
- split->len = em->start + em->len - (start + len);
- split->bdev = em->bdev;
- split->flags = flags;
- split->compress_type = em->compress_type;
- split->generation = gen;
- if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- split->orig_block_len = max(em->block_len,
- em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->block_start = em->block_start;
- split->orig_start = em->orig_start;
- } else {
- split->block_len = split->len;
- split->block_start = em->block_start
- + diff;
- split->orig_start = em->orig_start;
- }
- } else {
- split->ram_bytes = split->len;
- split->orig_start = split->start;
- split->block_len = 0;
- split->block_start = em->block_start;
- split->orig_block_len = 0;
- }
- if (extent_map_in_tree(em)) {
- replace_extent_mapping(em_tree, em, split,
- modified);
- } else {
- ret = add_extent_mapping(em_tree, split,
- modified);
- ASSERT(ret == 0); /* Logic error */
- }
- free_extent_map(split);
- split = NULL;
- }
- next:
- if (extent_map_in_tree(em))
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- /* once for us */
- free_extent_map(em);
- /* once for the tree*/
- free_extent_map(em);
- }
- if (split)
- free_extent_map(split);
- if (split2)
- free_extent_map(split2);
- }
- /*
- * this is very complex, but the basic idea is to drop all extents
- * in the range start - end. hint_block is filled in with a block number
- * that would be a good hint to the block allocator for this file.
- *
- * If an extent intersects the range but is not entirely inside the range
- * it is either truncated or split. Anything entirely inside the range
- * is deleted from the tree.
- */
- int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode,
- struct btrfs_path *path, u64 start, u64 end,
- u64 *drop_end, int drop_cache,
- int replace_extent,
- u32 extent_item_size,
- int *key_inserted)
- {
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *fi;
- struct btrfs_key key;
- struct btrfs_key new_key;
- u64 ino = btrfs_ino(inode);
- u64 search_start = start;
- u64 disk_bytenr = 0;
- u64 num_bytes = 0;
- u64 extent_offset = 0;
- u64 extent_end = 0;
- int del_nr = 0;
- int del_slot = 0;
- int extent_type;
- int recow;
- int ret;
- int modify_tree = -1;
- int update_refs;
- int found = 0;
- int leafs_visited = 0;
- if (drop_cache)
- btrfs_drop_extent_cache(inode, start, end - 1, 0);
- if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
- modify_tree = 0;
- update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == root->fs_info->tree_root);
- while (1) {
- recow = 0;
- ret = btrfs_lookup_file_extent(trans, root, path, ino,
- search_start, modify_tree);
- if (ret < 0)
- break;
- if (ret > 0 && path->slots[0] > 0 && search_start == start) {
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
- if (key.objectid == ino &&
- key.type == BTRFS_EXTENT_DATA_KEY)
- path->slots[0]--;
- }
- ret = 0;
- leafs_visited++;
- next_slot:
- leaf = path->nodes[0];
- if (path->slots[0] >= btrfs_header_nritems(leaf)) {
- BUG_ON(del_nr > 0);
- ret = btrfs_next_leaf(root, path);
- if (ret < 0)
- break;
- if (ret > 0) {
- ret = 0;
- break;
- }
- leafs_visited++;
- leaf = path->nodes[0];
- recow = 1;
- }
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid > ino)
- break;
- if (WARN_ON_ONCE(key.objectid < ino) ||
- key.type < BTRFS_EXTENT_DATA_KEY) {
- ASSERT(del_nr == 0);
- path->slots[0]++;
- goto next_slot;
- }
- if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
- break;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_type = btrfs_file_extent_type(leaf, fi);
- if (extent_type == BTRFS_FILE_EXTENT_REG ||
- extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
- disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- extent_offset = btrfs_file_extent_offset(leaf, fi);
- extent_end = key.offset +
- btrfs_file_extent_num_bytes(leaf, fi);
- } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- extent_end = key.offset +
- btrfs_file_extent_inline_len(leaf,
- path->slots[0], fi);
- } else {
- /* can't happen */
- BUG();
- }
- /*
- * Don't skip extent items representing 0 byte lengths. They
- * used to be created (bug) if while punching holes we hit
- * -ENOSPC condition. So if we find one here, just ensure we
- * delete it, otherwise we would insert a new file extent item
- * with the same key (offset) as that 0 bytes length file
- * extent item in the call to setup_items_for_insert() later
- * in this function.
- */
- if (extent_end == key.offset && extent_end >= search_start)
- goto delete_extent_item;
- if (extent_end <= search_start) {
- path->slots[0]++;
- goto next_slot;
- }
- found = 1;
- search_start = max(key.offset, start);
- if (recow || !modify_tree) {
- modify_tree = -1;
- btrfs_release_path(path);
- continue;
- }
- /*
- * | - range to drop - |
- * | -------- extent -------- |
- */
- if (start > key.offset && end < extent_end) {
- BUG_ON(del_nr > 0);
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = -EOPNOTSUPP;
- break;
- }
- memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = start;
- ret = btrfs_duplicate_item(trans, root, path,
- &new_key);
- if (ret == -EAGAIN) {
- btrfs_release_path(path);
- continue;
- }
- if (ret < 0)
- break;
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- start - key.offset);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- extent_offset += start - key.offset;
- btrfs_set_file_extent_offset(leaf, fi, extent_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - start);
- btrfs_mark_buffer_dirty(leaf);
- if (update_refs && disk_bytenr > 0) {
- ret = btrfs_inc_extent_ref(trans, root,
- disk_bytenr, num_bytes, 0,
- root->root_key.objectid,
- new_key.objectid,
- start - extent_offset);
- BUG_ON(ret); /* -ENOMEM */
- }
- key.offset = start;
- }
- /*
- * | ---- range to drop ----- |
- * | -------- extent -------- |
- */
- if (start <= key.offset && end < extent_end) {
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = -EOPNOTSUPP;
- break;
- }
- memcpy(&new_key, &key, sizeof(new_key));
- new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
- extent_offset += end - key.offset;
- btrfs_set_file_extent_offset(leaf, fi, extent_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - end);
- btrfs_mark_buffer_dirty(leaf);
- if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(inode, end - key.offset);
- break;
- }
- search_start = extent_end;
- /*
- * | ---- range to drop ----- |
- * | -------- extent -------- |
- */
- if (start > key.offset && end >= extent_end) {
- BUG_ON(del_nr > 0);
- if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
- ret = -EOPNOTSUPP;
- break;
- }
- btrfs_set_file_extent_num_bytes(leaf, fi,
- start - key.offset);
- btrfs_mark_buffer_dirty(leaf);
- if (update_refs && disk_bytenr > 0)
- inode_sub_bytes(inode, extent_end - start);
- if (end == extent_end)
- break;
- path->slots[0]++;
- goto next_slot;
- }
- /*
- * | ---- range to drop ----- |
- * | ------ extent ------ |
- */
- if (start <= key.offset && end >= extent_end) {
- delete_extent_item:
- if (del_nr == 0) {
- del_slot = path->slots[0];
- del_nr = 1;
- } else {
- BUG_ON(del_slot + del_nr != path->slots[0]);
- del_nr++;
- }
- if (update_refs &&
- extent_type == BTRFS_FILE_EXTENT_INLINE) {
- inode_sub_bytes(inode,
- extent_end - key.offset);
- extent_end = ALIGN(extent_end,
- root->sectorsize);
- } else if (update_refs && disk_bytenr > 0) {
- ret = btrfs_free_extent(trans, root,
- disk_bytenr, num_bytes, 0,
- root->root_key.objectid,
- key.objectid, key.offset -
- extent_offset);
- BUG_ON(ret); /* -ENOMEM */
- inode_sub_bytes(inode,
- extent_end - key.offset);
- }
- if (end == extent_end)
- break;
- if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
- path->slots[0]++;
- goto next_slot;
- }
- ret = btrfs_del_items(trans, root, path, del_slot,
- del_nr);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- break;
- }
- del_nr = 0;
- del_slot = 0;
- btrfs_release_path(path);
- continue;
- }
- BUG_ON(1);
- }
- if (!ret && del_nr > 0) {
- /*
- * Set path->slots[0] to first slot, so that after the delete
- * if items are move off from our leaf to its immediate left or
- * right neighbor leafs, we end up with a correct and adjusted
- * path->slots[0] for our insertion (if replace_extent != 0).
- */
- path->slots[0] = del_slot;
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- if (ret)
- btrfs_abort_transaction(trans, ret);
- }
- leaf = path->nodes[0];
- /*
- * If btrfs_del_items() was called, it might have deleted a leaf, in
- * which case it unlocked our path, so check path->locks[0] matches a
- * write lock.
- */
- if (!ret && replace_extent && leafs_visited == 1 &&
- (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
- path->locks[0] == BTRFS_WRITE_LOCK) &&
- btrfs_leaf_free_space(root, leaf) >=
- sizeof(struct btrfs_item) + extent_item_size) {
- key.objectid = ino;
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = start;
- if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
- struct btrfs_key slot_key;
- btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
- if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
- path->slots[0]++;
- }
- setup_items_for_insert(root, path, &key,
- &extent_item_size,
- extent_item_size,
- sizeof(struct btrfs_item) +
- extent_item_size, 1);
- *key_inserted = 1;
- }
- if (!replace_extent || !(*key_inserted))
- btrfs_release_path(path);
- if (drop_end)
- *drop_end = found ? min(end, extent_end) : end;
- return ret;
- }
- int btrfs_drop_extents(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode, u64 start,
- u64 end, int drop_cache)
- {
- struct btrfs_path *path;
- int ret;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- ret = __btrfs_drop_extents(trans, root, inode, path, start, end, NULL,
- drop_cache, 0, 0, NULL);
- btrfs_free_path(path);
- return ret;
- }
- static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 orig_offset,
- u64 *start, u64 *end)
- {
- struct btrfs_file_extent_item *fi;
- struct btrfs_key key;
- u64 extent_end;
- if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
- fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
- btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
- btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
- btrfs_file_extent_compression(leaf, fi) ||
- btrfs_file_extent_encryption(leaf, fi) ||
- btrfs_file_extent_other_encoding(leaf, fi))
- return 0;
- extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if ((*start && *start != key.offset) || (*end && *end != extent_end))
- return 0;
- *start = key.offset;
- *end = extent_end;
- return 1;
- }
- /*
- * Mark extent in the range start - end as written.
- *
- * This changes extent type from 'pre-allocated' to 'regular'. If only
- * part of extent is marked as written, the extent will be split into
- * two or three.
- */
- int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
- struct inode *inode, u64 start, u64 end)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_buffer *leaf;
- struct btrfs_path *path;
- struct btrfs_file_extent_item *fi;
- struct btrfs_key key;
- struct btrfs_key new_key;
- u64 bytenr;
- u64 num_bytes;
- u64 extent_end;
- u64 orig_offset;
- u64 other_start;
- u64 other_end;
- u64 split;
- int del_nr = 0;
- int del_slot = 0;
- int recow;
- int ret;
- u64 ino = btrfs_ino(inode);
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
- again:
- recow = 0;
- split = start;
- key.objectid = ino;
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = split;
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
- if (ret > 0 && path->slots[0] > 0)
- path->slots[0]--;
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid != ino ||
- key.type != BTRFS_EXTENT_DATA_KEY) {
- ret = -EINVAL;
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
- ret = -EINVAL;
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
- if (key.offset > start || extent_end < end) {
- ret = -EINVAL;
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
- num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
- orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
- memcpy(&new_key, &key, sizeof(new_key));
- if (start == key.offset && end < extent_end) {
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1,
- ino, bytenr, orig_offset,
- &other_start, &other_end)) {
- new_key.offset = end;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - end);
- btrfs_set_file_extent_offset(leaf, fi,
- end - orig_offset);
- fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- end - other_start);
- btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
- }
- if (start > key.offset && end == extent_end) {
- other_start = end;
- other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1,
- ino, bytenr, orig_offset,
- &other_start, &other_end)) {
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- start - key.offset);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- path->slots[0]++;
- new_key.offset = start;
- btrfs_set_item_key_safe(root->fs_info, path, &new_key);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi,
- trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- other_end - start);
- btrfs_set_file_extent_offset(leaf, fi,
- start - orig_offset);
- btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
- }
- while (start > key.offset || end < extent_end) {
- if (key.offset == start)
- split = end;
- new_key.offset = split;
- ret = btrfs_duplicate_item(trans, root, path, &new_key);
- if (ret == -EAGAIN) {
- btrfs_release_path(path);
- goto again;
- }
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- leaf = path->nodes[0];
- fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- split - key.offset);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - split);
- btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
- root->root_key.objectid,
- ino, orig_offset);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- if (split == start) {
- key.offset = start;
- } else {
- if (start != key.offset) {
- ret = -EINVAL;
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- path->slots[0]--;
- extent_end = end;
- }
- recow = 1;
- }
- other_start = end;
- other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1,
- ino, bytenr, orig_offset,
- &other_start, &other_end)) {
- if (recow) {
- btrfs_release_path(path);
- goto again;
- }
- extent_end = other_end;
- del_slot = path->slots[0] + 1;
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- ino, orig_offset);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- }
- other_start = 0;
- other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1,
- ino, bytenr, orig_offset,
- &other_start, &other_end)) {
- if (recow) {
- btrfs_release_path(path);
- goto again;
- }
- key.offset = other_start;
- del_slot = path->slots[0];
- del_nr++;
- ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
- 0, root->root_key.objectid,
- ino, orig_offset);
- if (ret) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- }
- if (del_nr == 0) {
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_mark_buffer_dirty(leaf);
- } else {
- fi = btrfs_item_ptr(leaf, del_slot - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_generation(leaf, fi, trans->transid);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
- btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- if (ret < 0) {
- btrfs_abort_transaction(trans, ret);
- goto out;
- }
- }
- out:
- btrfs_free_path(path);
- return 0;
- }
- /*
- * on error we return an unlocked page and the error value
- * on success we return a locked page and 0
- */
- static int prepare_uptodate_page(struct inode *inode,
- struct page *page, u64 pos,
- bool force_uptodate)
- {
- int ret = 0;
- if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
- !PageUptodate(page)) {
- ret = btrfs_readpage(NULL, page);
- if (ret)
- return ret;
- lock_page(page);
- if (!PageUptodate(page)) {
- unlock_page(page);
- return -EIO;
- }
- if (page->mapping != inode->i_mapping) {
- unlock_page(page);
- return -EAGAIN;
- }
- }
- return 0;
- }
- /*
- * this just gets pages into the page cache and locks them down.
- */
- static noinline int prepare_pages(struct inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes, bool force_uptodate)
- {
- int i;
- unsigned long index = pos >> PAGE_SHIFT;
- gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
- int err = 0;
- int faili;
- for (i = 0; i < num_pages; i++) {
- again:
- pages[i] = find_or_create_page(inode->i_mapping, index + i,
- mask | __GFP_WRITE);
- if (!pages[i]) {
- faili = i - 1;
- err = -ENOMEM;
- goto fail;
- }
- if (i == 0)
- err = prepare_uptodate_page(inode, pages[i], pos,
- force_uptodate);
- if (!err && i == num_pages - 1)
- err = prepare_uptodate_page(inode, pages[i],
- pos + write_bytes, false);
- if (err) {
- put_page(pages[i]);
- if (err == -EAGAIN) {
- err = 0;
- goto again;
- }
- faili = i - 1;
- goto fail;
- }
- wait_on_page_writeback(pages[i]);
- }
- return 0;
- fail:
- while (faili >= 0) {
- unlock_page(pages[faili]);
- put_page(pages[faili]);
- faili--;
- }
- return err;
- }
- /*
- * This function locks the extent and properly waits for data=ordered extents
- * to finish before allowing the pages to be modified if need.
- *
- * The return value:
- * 1 - the extent is locked
- * 0 - the extent is not locked, and everything is OK
- * -EAGAIN - need re-prepare the pages
- * the other < 0 number - Something wrong happens
- */
- static noinline int
- lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
- size_t num_pages, loff_t pos,
- size_t write_bytes,
- u64 *lockstart, u64 *lockend,
- struct extent_state **cached_state)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 start_pos;
- u64 last_pos;
- int i;
- int ret = 0;
- start_pos = round_down(pos, root->sectorsize);
- last_pos = start_pos
- + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
- if (start_pos < inode->i_size) {
- struct btrfs_ordered_extent *ordered;
- lock_extent_bits(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos, cached_state);
- ordered = btrfs_lookup_ordered_range(inode, start_pos,
- last_pos - start_pos + 1);
- if (ordered &&
- ordered->file_offset + ordered->len > start_pos &&
- ordered->file_offset <= last_pos) {
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- start_pos, last_pos,
- cached_state, GFP_NOFS);
- for (i = 0; i < num_pages; i++) {
- unlock_page(pages[i]);
- put_page(pages[i]);
- }
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- return -EAGAIN;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos,
- last_pos, EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
- 0, 0, cached_state, GFP_NOFS);
- *lockstart = start_pos;
- *lockend = last_pos;
- ret = 1;
- }
- for (i = 0; i < num_pages; i++) {
- if (clear_page_dirty_for_io(pages[i]))
- account_page_redirty(pages[i]);
- set_page_extent_mapped(pages[i]);
- WARN_ON(!PageLocked(pages[i]));
- }
- return ret;
- }
- static noinline int check_can_nocow(struct inode *inode, loff_t pos,
- size_t *write_bytes)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_ordered_extent *ordered;
- u64 lockstart, lockend;
- u64 num_bytes;
- int ret;
- ret = btrfs_start_write_no_snapshoting(root);
- if (!ret)
- return -ENOSPC;
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
- while (1) {
- lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
- ordered = btrfs_lookup_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
- if (!ordered) {
- break;
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- }
- num_bytes = lockend - lockstart + 1;
- ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
- if (ret <= 0) {
- ret = 0;
- btrfs_end_write_no_snapshoting(root);
- } else {
- *write_bytes = min_t(size_t, *write_bytes ,
- num_bytes - pos + lockstart);
- }
- unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
- return ret;
- }
- static noinline ssize_t __btrfs_buffered_write(struct file *file,
- struct iov_iter *i,
- loff_t pos)
- {
- struct inode *inode = file_inode(file);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct page **pages = NULL;
- struct extent_state *cached_state = NULL;
- u64 release_bytes = 0;
- u64 lockstart;
- u64 lockend;
- size_t num_written = 0;
- int nrptrs;
- int ret = 0;
- bool only_release_metadata = false;
- bool force_page_uptodate = false;
- bool need_unlock;
- nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
- PAGE_SIZE / (sizeof(struct page *)));
- nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
- nrptrs = max(nrptrs, 8);
- pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
- while (iov_iter_count(i) > 0) {
- size_t offset = pos & (PAGE_SIZE - 1);
- size_t sector_offset;
- size_t write_bytes = min(iov_iter_count(i),
- nrptrs * (size_t)PAGE_SIZE -
- offset);
- size_t num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
- size_t reserve_bytes;
- size_t dirty_pages;
- size_t copied;
- size_t dirty_sectors;
- size_t num_sectors;
- WARN_ON(num_pages > nrptrs);
- /*
- * Fault pages before locking them in prepare_pages
- * to avoid recursive lock
- */
- if (unlikely(iov_iter_fault_in_readable(i, write_bytes))) {
- ret = -EFAULT;
- break;
- }
- sector_offset = pos & (root->sectorsize - 1);
- reserve_bytes = round_up(write_bytes + sector_offset,
- root->sectorsize);
- ret = btrfs_check_data_free_space(inode, pos, write_bytes);
- if (ret < 0) {
- if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
- BTRFS_INODE_PREALLOC)) &&
- check_can_nocow(inode, pos, &write_bytes) > 0) {
- /*
- * For nodata cow case, no need to reserve
- * data space.
- */
- only_release_metadata = true;
- /*
- * our prealloc extent may be smaller than
- * write_bytes, so scale down.
- */
- num_pages = DIV_ROUND_UP(write_bytes + offset,
- PAGE_SIZE);
- reserve_bytes = round_up(write_bytes +
- sector_offset,
- root->sectorsize);
- } else {
- break;
- }
- }
- ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
- if (ret) {
- if (!only_release_metadata)
- btrfs_free_reserved_data_space(inode, pos,
- write_bytes);
- else
- btrfs_end_write_no_snapshoting(root);
- break;
- }
- release_bytes = reserve_bytes;
- need_unlock = false;
- again:
- /*
- * This is going to setup the pages array with the number of
- * pages we want, so we don't really need to worry about the
- * contents of pages from loop to loop
- */
- ret = prepare_pages(inode, pages, num_pages,
- pos, write_bytes,
- force_page_uptodate);
- if (ret)
- break;
- ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
- pos, write_bytes, &lockstart,
- &lockend, &cached_state);
- if (ret < 0) {
- if (ret == -EAGAIN)
- goto again;
- break;
- } else if (ret > 0) {
- need_unlock = true;
- ret = 0;
- }
- copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- reserve_bytes);
- dirty_sectors = round_up(copied + sector_offset,
- root->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- dirty_sectors);
- /*
- * if we have trouble faulting in the pages, fall
- * back to one page at a time
- */
- if (copied < write_bytes)
- nrptrs = 1;
- if (copied == 0) {
- force_page_uptodate = true;
- dirty_sectors = 0;
- dirty_pages = 0;
- } else {
- force_page_uptodate = false;
- dirty_pages = DIV_ROUND_UP(copied + offset,
- PAGE_SIZE);
- }
- /*
- * If we had a short copy we need to release the excess delaloc
- * bytes we reserved. We need to increment outstanding_extents
- * because btrfs_delalloc_release_space and
- * btrfs_delalloc_release_metadata will decrement it, but
- * we still have an outstanding extent for the chunk we actually
- * managed to copy.
- */
- if (num_sectors > dirty_sectors) {
- /* release everything except the sectors we dirtied */
- release_bytes -= dirty_sectors <<
- root->fs_info->sb->s_blocksize_bits;
- if (copied > 0) {
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents++;
- spin_unlock(&BTRFS_I(inode)->lock);
- }
- if (only_release_metadata) {
- btrfs_delalloc_release_metadata(inode,
- release_bytes);
- } else {
- u64 __pos;
- __pos = round_down(pos, root->sectorsize) +
- (dirty_pages << PAGE_SHIFT);
- btrfs_delalloc_release_space(inode, __pos,
- release_bytes);
- }
- }
- release_bytes = round_up(copied + sector_offset,
- root->sectorsize);
- if (copied > 0)
- ret = btrfs_dirty_pages(root, inode, pages,
- dirty_pages, pos, copied,
- NULL);
- if (need_unlock)
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state,
- GFP_NOFS);
- if (ret) {
- btrfs_drop_pages(pages, num_pages);
- break;
- }
- release_bytes = 0;
- if (only_release_metadata)
- btrfs_end_write_no_snapshoting(root);
- if (only_release_metadata && copied > 0) {
- lockstart = round_down(pos, root->sectorsize);
- lockend = round_up(pos + copied, root->sectorsize) - 1;
- set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_NORESERVE, NULL,
- NULL, GFP_NOFS);
- only_release_metadata = false;
- }
- btrfs_drop_pages(pages, num_pages);
- cond_resched();
- balance_dirty_pages_ratelimited(inode->i_mapping);
- if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
- btrfs_btree_balance_dirty(root);
- pos += copied;
- num_written += copied;
- }
- kfree(pages);
- if (release_bytes) {
- if (only_release_metadata) {
- btrfs_end_write_no_snapshoting(root);
- btrfs_delalloc_release_metadata(inode, release_bytes);
- } else {
- btrfs_delalloc_release_space(inode,
- round_down(pos, root->sectorsize),
- release_bytes);
- }
- }
- return num_written ? num_written : ret;
- }
- static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
- {
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- loff_t pos = iocb->ki_pos;
- ssize_t written;
- ssize_t written_buffered;
- loff_t endbyte;
- int err;
- written = generic_file_direct_write(iocb, from);
- if (written < 0 || !iov_iter_count(from))
- return written;
- pos += written;
- written_buffered = __btrfs_buffered_write(file, from, pos);
- if (written_buffered < 0) {
- err = written_buffered;
- goto out;
- }
- /*
- * Ensure all data is persisted. We want the next direct IO read to be
- * able to read what was just written.
- */
- endbyte = pos + written_buffered - 1;
- err = btrfs_fdatawrite_range(inode, pos, endbyte);
- if (err)
- goto out;
- err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
- if (err)
- goto out;
- written += written_buffered;
- iocb->ki_pos = pos + written_buffered;
- invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
- endbyte >> PAGE_SHIFT);
- out:
- return written ? written : err;
- }
- static void update_time_for_write(struct inode *inode)
- {
- struct timespec now;
- if (IS_NOCMTIME(inode))
- return;
- now = current_time(inode);
- if (!timespec_equal(&inode->i_mtime, &now))
- inode->i_mtime = now;
- if (!timespec_equal(&inode->i_ctime, &now))
- inode->i_ctime = now;
- if (IS_I_VERSION(inode))
- inode_inc_iversion(inode);
- }
- static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
- struct iov_iter *from)
- {
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- u64 start_pos;
- u64 end_pos;
- ssize_t num_written = 0;
- bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
- ssize_t err;
- loff_t pos;
- size_t count;
- loff_t oldsize;
- int clean_page = 0;
- inode_lock(inode);
- err = generic_write_checks(iocb, from);
- if (err <= 0) {
- inode_unlock(inode);
- return err;
- }
- current->backing_dev_info = inode_to_bdi(inode);
- err = file_remove_privs(file);
- if (err) {
- inode_unlock(inode);
- goto out;
- }
- /*
- * If BTRFS flips readonly due to some impossible error
- * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
- * although we have opened a file as writable, we have
- * to stop this write operation to ensure FS consistency.
- */
- if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
- inode_unlock(inode);
- err = -EROFS;
- goto out;
- }
- /*
- * We reserve space for updating the inode when we reserve space for the
- * extent we are going to write, so we will enospc out there. We don't
- * need to start yet another transaction to update the inode as we will
- * update the inode when we finish writing whatever data we write.
- */
- update_time_for_write(inode);
- pos = iocb->ki_pos;
- count = iov_iter_count(from);
- start_pos = round_down(pos, root->sectorsize);
- oldsize = i_size_read(inode);
- if (start_pos > oldsize) {
- /* Expand hole size to cover write data, preventing empty gap */
- end_pos = round_up(pos + count, root->sectorsize);
- err = btrfs_cont_expand(inode, oldsize, end_pos);
- if (err) {
- inode_unlock(inode);
- goto out;
- }
- if (start_pos > round_up(oldsize, root->sectorsize))
- clean_page = 1;
- }
- if (sync)
- atomic_inc(&BTRFS_I(inode)->sync_writers);
- if (iocb->ki_flags & IOCB_DIRECT) {
- num_written = __btrfs_direct_write(iocb, from);
- } else {
- num_written = __btrfs_buffered_write(file, from, pos);
- if (num_written > 0)
- iocb->ki_pos = pos + num_written;
- if (clean_page)
- pagecache_isize_extended(inode, oldsize,
- i_size_read(inode));
- }
- inode_unlock(inode);
- /*
- * We also have to set last_sub_trans to the current log transid,
- * otherwise subsequent syncs to a file that's been synced in this
- * transaction will appear to have already occurred.
- */
- spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->last_sub_trans = root->log_transid;
- spin_unlock(&BTRFS_I(inode)->lock);
- if (num_written > 0)
- num_written = generic_write_sync(iocb, num_written);
- if (sync)
- atomic_dec(&BTRFS_I(inode)->sync_writers);
- out:
- current->backing_dev_info = NULL;
- return num_written ? num_written : err;
- }
- int btrfs_release_file(struct inode *inode, struct file *filp)
- {
- if (filp->private_data)
- btrfs_ioctl_trans_end(filp);
- /*
- * ordered_data_close is set by settattr when we are about to truncate
- * a file from a non-zero size to a zero size. This tries to
- * flush down new bytes that may have been written if the
- * application were using truncate to replace a file in place.
- */
- if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
- &BTRFS_I(inode)->runtime_flags))
- filemap_flush(inode->i_mapping);
- return 0;
- }
- static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
- {
- int ret;
- struct blk_plug plug;
- /*
- * This is only called in fsync, which would do synchronous writes, so
- * a plug can merge adjacent IOs as much as possible. Esp. in case of
- * multiple disks using raid profile, a large IO can be split to
- * several segments of stripe length (currently 64K).
- */
- blk_start_plug(&plug);
- atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = btrfs_fdatawrite_range(inode, start, end);
- atomic_dec(&BTRFS_I(inode)->sync_writers);
- blk_finish_plug(&plug);
- return ret;
- }
- /*
- * fsync call for both files and directories. This logs the inode into
- * the tree log instead of forcing full commits whenever possible.
- *
- * It needs to call filemap_fdatawait so that all ordered extent updates are
- * in the metadata btree are up to date for copying to the log.
- *
- * It drops the inode mutex before doing the tree log commit. This is an
- * important optimization for directories because holding the mutex prevents
- * new operations on the dir while we write to disk.
- */
- int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
- {
- struct dentry *dentry = file_dentry(file);
- struct inode *inode = d_inode(dentry);
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_trans_handle *trans;
- struct btrfs_log_ctx ctx;
- int ret = 0;
- bool full_sync = 0;
- u64 len;
- /*
- * If the inode needs a full sync, make sure we use a full range to
- * avoid log tree corruption, due to hole detection racing with ordered
- * extent completion for adjacent ranges, and assertion failures during
- * hole detection.
- */
- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags)) {
- start = 0;
- end = LLONG_MAX;
- }
- /*
- * The range length can be represented by u64, we have to do the typecasts
- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
- */
- len = (u64)end - (u64)start + 1;
- trace_btrfs_sync_file(file, datasync);
- /*
- * We write the dirty pages in the range and wait until they complete
- * out of the ->i_mutex. If so, we can flush the dirty pages by
- * multi-task, and make the performance up. See
- * btrfs_wait_ordered_range for an explanation of the ASYNC check.
- */
- ret = start_ordered_ops(inode, start, end);
- if (ret)
- return ret;
- inode_lock(inode);
- atomic_inc(&root->log_batch);
- full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
- /*
- * We might have have had more pages made dirty after calling
- * start_ordered_ops and before acquiring the inode's i_mutex.
- */
- if (full_sync) {
- /*
- * For a full sync, we need to make sure any ordered operations
- * start and finish before we start logging the inode, so that
- * all extents are persisted and the respective file extent
- * items are in the fs/subvol btree.
- */
- ret = btrfs_wait_ordered_range(inode, start, len);
- } else {
- /*
- * Start any new ordered operations before starting to log the
- * inode. We will wait for them to finish in btrfs_sync_log().
- *
- * Right before acquiring the inode's mutex, we might have new
- * writes dirtying pages, which won't immediately start the
- * respective ordered operations - that is done through the
- * fill_delalloc callbacks invoked from the writepage and
- * writepages address space operations. So make sure we start
- * all ordered operations before starting to log our inode. Not
- * doing this means that while logging the inode, writeback
- * could start and invoke writepage/writepages, which would call
- * the fill_delalloc callbacks (cow_file_range,
- * submit_compressed_extents). These callbacks add first an
- * extent map to the modified list of extents and then create
- * the respective ordered operation, which means in
- * tree-log.c:btrfs_log_inode() we might capture all existing
- * ordered operations (with btrfs_get_logged_extents()) before
- * the fill_delalloc callback adds its ordered operation, and by
- * the time we visit the modified list of extent maps (with
- * btrfs_log_changed_extents()), we see and process the extent
- * map they created. We then use the extent map to construct a
- * file extent item for logging without waiting for the
- * respective ordered operation to finish - this file extent
- * item points to a disk location that might not have yet been
- * written to, containing random data - so after a crash a log
- * replay will make our inode have file extent items that point
- * to disk locations containing invalid data, as we returned
- * success to userspace without waiting for the respective
- * ordered operation to finish, because it wasn't captured by
- * btrfs_get_logged_extents().
- */
- ret = start_ordered_ops(inode, start, end);
- }
- if (ret) {
- inode_unlock(inode);
- goto out;
- }
- atomic_inc(&root->log_batch);
- /*
- * If the last transaction that changed this file was before the current
- * transaction and we have the full sync flag set in our inode, we can
- * bail out now without any syncing.
- *
- * Note that we can't bail out if the full sync flag isn't set. This is
- * because when the full sync flag is set we start all ordered extents
- * and wait for them to fully complete - when they complete they update
- * the inode's last_trans field through:
- *
- * btrfs_finish_ordered_io() ->
- * btrfs_update_inode_fallback() ->
- * btrfs_update_inode() ->
- * btrfs_set_inode_last_trans()
- *
- * So we are sure that last_trans is up to date and can do this check to
- * bail out safely. For the fast path, when the full sync flag is not
- * set in our inode, we can not do it because we start only our ordered
- * extents and don't wait for them to complete (that is when
- * btrfs_finish_ordered_io runs), so here at this point their last_trans
- * value might be less than or equals to fs_info->last_trans_committed,
- * and setting a speculative last_trans for an inode when a buffered
- * write is made (such as fs_info->generation + 1 for example) would not
- * be reliable since after setting the value and before fsync is called
- * any number of transactions can start and commit (transaction kthread
- * commits the current transaction periodically), and a transaction
- * commit does not start nor waits for ordered extents to complete.
- */
- smp_mb();
- if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
- (full_sync && BTRFS_I(inode)->last_trans <=
- root->fs_info->last_trans_committed) ||
- (!btrfs_have_ordered_extents_in_range(inode, start, len) &&
- BTRFS_I(inode)->last_trans
- <= root->fs_info->last_trans_committed)) {
- /*
- * We've had everything committed since the last time we were
- * modified so clear this flag in case it was set for whatever
- * reason, it's no longer relevant.
- */
- clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
- /*
- * An ordered extent might have started before and completed
- * already with io errors, in which case the inode was not
- * updated and we end up here. So check the inode's mapping
- * flags for any errors that might have happened while doing
- * writeback of file data.
- */
- ret = filemap_check_errors(inode->i_mapping);
- inode_unlock(inode);
- goto out;
- }
- /*
- * ok we haven't committed the transaction yet, lets do a commit
- */
- if (file->private_data)
- btrfs_ioctl_trans_end(file);
- /*
- * We use start here because we will need to wait on the IO to complete
- * in btrfs_sync_log, which could require joining a transaction (for
- * example checking cross references in the nocow path). If we use join
- * here we could get into a situation where we're waiting on IO to
- * happen that is blocked on a transaction trying to commit. With start
- * we inc the extwriter counter, so we wait for all extwriters to exit
- * before we start blocking join'ers. This comment is to keep somebody
- * from thinking they are super smart and changing this to
- * btrfs_join_transaction *cough*Josef*cough*.
- */
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- inode_unlock(inode);
- goto out;
- }
- trans->sync = true;
- btrfs_init_log_ctx(&ctx, inode);
- ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
- if (ret < 0) {
- /* Fallthrough and commit/free transaction. */
- ret = 1;
- }
- /* we've logged all the items and now have a consistent
- * version of the file in the log. It is possible that
- * someone will come in and modify the file, but that's
- * fine because the log is consistent on disk, and we
- * have references to all of the file's extents
- *
- * It is possible that someone will come in and log the
- * file again, but that will end up using the synchronization
- * inside btrfs_sync_log to keep things safe.
- */
- inode_unlock(inode);
- /*
- * If any of the ordered extents had an error, just return it to user
- * space, so that the application knows some writes didn't succeed and
- * can take proper action (retry for e.g.). Blindly committing the
- * transaction in this case, would fool userspace that everything was
- * successful. And we also want to make sure our log doesn't contain
- * file extent items pointing to extents that weren't fully written to -
- * just like in the non fast fsync path, where we check for the ordered
- * operation's error flag before writing to the log tree and return -EIO
- * if any of them had this flag set (btrfs_wait_ordered_range) -
- * therefore we need to check for errors in the ordered operations,
- * which are indicated by ctx.io_err.
- */
- if (ctx.io_err) {
- btrfs_end_transaction(trans, root);
- ret = ctx.io_err;
- goto out;
- }
- if (ret != BTRFS_NO_LOG_SYNC) {
- if (!ret) {
- ret = btrfs_sync_log(trans, root, &ctx);
- if (!ret) {
- ret = btrfs_end_transaction(trans, root);
- goto out;
- }
- }
- if (!full_sync) {
- ret = btrfs_wait_ordered_range(inode, start, len);
- if (ret) {
- btrfs_end_transaction(trans, root);
- goto out;
- }
- }
- ret = btrfs_commit_transaction(trans, root);
- } else {
- ret = btrfs_end_transaction(trans, root);
- }
- out:
- return ret > 0 ? -EIO : ret;
- }
- static const struct vm_operations_struct btrfs_file_vm_ops = {
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
- .page_mkwrite = btrfs_page_mkwrite,
- };
- static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma)
- {
- struct address_space *mapping = filp->f_mapping;
- if (!mapping->a_ops->readpage)
- return -ENOEXEC;
- file_accessed(filp);
- vma->vm_ops = &btrfs_file_vm_ops;
- return 0;
- }
- static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
- int slot, u64 start, u64 end)
- {
- struct btrfs_file_extent_item *fi;
- struct btrfs_key key;
- if (slot < 0 || slot >= btrfs_header_nritems(leaf))
- return 0;
- btrfs_item_key_to_cpu(leaf, &key, slot);
- if (key.objectid != btrfs_ino(inode) ||
- key.type != BTRFS_EXTENT_DATA_KEY)
- return 0;
- fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
- if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
- return 0;
- if (btrfs_file_extent_disk_bytenr(leaf, fi))
- return 0;
- if (key.offset == end)
- return 1;
- if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
- return 1;
- return 0;
- }
- static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
- struct btrfs_path *path, u64 offset, u64 end)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_buffer *leaf;
- struct btrfs_file_extent_item *fi;
- struct extent_map *hole_em;
- struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- struct btrfs_key key;
- int ret;
- if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
- goto out;
- key.objectid = btrfs_ino(inode);
- key.type = BTRFS_EXTENT_DATA_KEY;
- key.offset = offset;
- ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
- if (ret < 0)
- return ret;
- BUG_ON(!ret);
- leaf = path->nodes[0];
- if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
- u64 num_bytes;
- path->slots[0]--;
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
- end - offset;
- btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
- if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
- u64 num_bytes;
- key.offset = offset;
- btrfs_set_item_key_safe(root->fs_info, path, &key);
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
- num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
- offset;
- btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
- btrfs_set_file_extent_offset(leaf, fi, 0);
- btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
- btrfs_release_path(path);
- ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
- 0, 0, end - offset, 0, end - offset,
- 0, 0, 0);
- if (ret)
- return ret;
- out:
- btrfs_release_path(path);
- hole_em = alloc_extent_map();
- if (!hole_em) {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
- } else {
- hole_em->start = offset;
- hole_em->len = end - offset;
- hole_em->ram_bytes = hole_em->len;
- hole_em->orig_start = offset;
- hole_em->block_start = EXTENT_MAP_HOLE;
- hole_em->block_len = 0;
- hole_em->orig_block_len = 0;
- hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
- hole_em->compress_type = BTRFS_COMPRESS_NONE;
- hole_em->generation = trans->transid;
- do {
- btrfs_drop_extent_cache(inode, offset, end - 1, 0);
- write_lock(&em_tree->lock);
- ret = add_extent_mapping(em_tree, hole_em, 1);
- write_unlock(&em_tree->lock);
- } while (ret == -EEXIST);
- free_extent_map(hole_em);
- if (ret)
- set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
- &BTRFS_I(inode)->runtime_flags);
- }
- return 0;
- }
- /*
- * Find a hole extent on given inode and change start/len to the end of hole
- * extent.(hole/vacuum extent whose em->start <= start &&
- * em->start + em->len > start)
- * When a hole extent is found, return 1 and modify start/len.
- */
- static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
- {
- struct extent_map *em;
- int ret = 0;
- em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
- if (IS_ERR_OR_NULL(em)) {
- if (!em)
- ret = -ENOMEM;
- else
- ret = PTR_ERR(em);
- return ret;
- }
- /* Hole or vacuum extent(only exists in no-hole mode) */
- if (em->block_start == EXTENT_MAP_HOLE) {
- ret = 1;
- *len = em->start + em->len > *start + *len ?
- 0 : *start + *len - em->start - em->len;
- *start = em->start + em->len;
- }
- free_extent_map(em);
- return ret;
- }
- static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_state *cached_state = NULL;
- struct btrfs_path *path;
- struct btrfs_block_rsv *rsv;
- struct btrfs_trans_handle *trans;
- u64 lockstart;
- u64 lockend;
- u64 tail_start;
- u64 tail_len;
- u64 orig_start = offset;
- u64 cur_offset;
- u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
- u64 drop_end;
- int ret = 0;
- int err = 0;
- unsigned int rsv_count;
- bool same_block;
- bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
- u64 ino_size;
- bool truncated_block = false;
- bool updated_inode = false;
- ret = btrfs_wait_ordered_range(inode, offset, len);
- if (ret)
- return ret;
- inode_lock(inode);
- ino_size = round_up(inode->i_size, root->sectorsize);
- ret = find_first_non_hole(inode, &offset, &len);
- if (ret < 0)
- goto out_only_mutex;
- if (ret && !len) {
- /* Already in a large hole */
- ret = 0;
- goto out_only_mutex;
- }
- lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
- lockend = round_down(offset + len,
- BTRFS_I(inode)->root->sectorsize) - 1;
- same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
- == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
- /*
- * We needn't truncate any block which is beyond the end of the file
- * because we are sure there is no data there.
- */
- /*
- * Only do this if we are in the same block and we aren't doing the
- * entire block.
- */
- if (same_block && len < root->sectorsize) {
- if (offset < ino_size) {
- truncated_block = true;
- ret = btrfs_truncate_block(inode, offset, len, 0);
- } else {
- ret = 0;
- }
- goto out_only_mutex;
- }
- /* zero back part of the first block */
- if (offset < ino_size) {
- truncated_block = true;
- ret = btrfs_truncate_block(inode, offset, 0, 0);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
- }
- /* Check the aligned pages after the first unaligned page,
- * if offset != orig_start, which means the first unaligned page
- * including several following pages are already in holes,
- * the extra check can be skipped */
- if (offset == orig_start) {
- /* after truncate page, check hole again */
- len = offset + len - lockstart;
- offset = lockstart;
- ret = find_first_non_hole(inode, &offset, &len);
- if (ret < 0)
- goto out_only_mutex;
- if (ret && !len) {
- ret = 0;
- goto out_only_mutex;
- }
- lockstart = offset;
- }
- /* Check the tail unaligned part is in a hole */
- tail_start = lockend + 1;
- tail_len = offset + len - tail_start;
- if (tail_len) {
- ret = find_first_non_hole(inode, &tail_start, &tail_len);
- if (unlikely(ret < 0))
- goto out_only_mutex;
- if (!ret) {
- /* zero the front end of the last page */
- if (tail_start + tail_len < ino_size) {
- truncated_block = true;
- ret = btrfs_truncate_block(inode,
- tail_start + tail_len,
- 0, 1);
- if (ret)
- goto out_only_mutex;
- }
- }
- }
- if (lockend < lockstart) {
- ret = 0;
- goto out_only_mutex;
- }
- while (1) {
- struct btrfs_ordered_extent *ordered;
- truncate_pagecache_range(inode, lockstart, lockend);
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode, lockend);
- /*
- * We need to make sure we have no ordered extents in this range
- * and nobody raced in and read a page in this range, if we did
- * we need to try again.
- */
- if ((!ordered ||
- (ordered->file_offset + ordered->len <= lockstart ||
- ordered->file_offset > lockend)) &&
- !btrfs_page_exists_in_range(inode, lockstart, lockend)) {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state, GFP_NOFS);
- ret = btrfs_wait_ordered_range(inode, lockstart,
- lockend - lockstart + 1);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
- }
- path = btrfs_alloc_path();
- if (!path) {
- ret = -ENOMEM;
- goto out;
- }
- rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
- if (!rsv) {
- ret = -ENOMEM;
- goto out_free;
- }
- rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
- rsv->failfast = 1;
- /*
- * 1 - update the inode
- * 1 - removing the extents in the range
- * 1 - adding the hole extent if no_holes isn't set
- */
- rsv_count = no_holes ? 2 : 3;
- trans = btrfs_start_transaction(root, rsv_count);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out_free;
- }
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size, 0);
- BUG_ON(ret);
- trans->block_rsv = rsv;
- cur_offset = lockstart;
- len = lockend - cur_offset;
- while (cur_offset < lockend) {
- ret = __btrfs_drop_extents(trans, root, inode, path,
- cur_offset, lockend + 1,
- &drop_end, 1, 0, 0, NULL);
- if (ret != -ENOSPC)
- break;
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- if (cur_offset < ino_size) {
- ret = fill_holes(trans, inode, path, cur_offset,
- drop_end);
- if (ret) {
- err = ret;
- break;
- }
- }
- cur_offset = drop_end;
- ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- err = ret;
- break;
- }
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
- trans = btrfs_start_transaction(root, rsv_count);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- trans = NULL;
- break;
- }
- ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size, 0);
- BUG_ON(ret); /* shouldn't happen */
- trans->block_rsv = rsv;
- ret = find_first_non_hole(inode, &cur_offset, &len);
- if (unlikely(ret < 0))
- break;
- if (ret && !len) {
- ret = 0;
- break;
- }
- }
- if (ret) {
- err = ret;
- goto out_trans;
- }
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- /*
- * If we are using the NO_HOLES feature we might have had already an
- * hole that overlaps a part of the region [lockstart, lockend] and
- * ends at (or beyond) lockend. Since we have no file extent items to
- * represent holes, drop_end can be less than lockend and so we must
- * make sure we have an extent map representing the existing hole (the
- * call to __btrfs_drop_extents() might have dropped the existing extent
- * map representing the existing hole), otherwise the fast fsync path
- * will not record the existence of the hole region
- * [existing_hole_start, lockend].
- */
- if (drop_end <= lockend)
- drop_end = lockend + 1;
- /*
- * Don't insert file hole extent item if it's for a range beyond eof
- * (because it's useless) or if it represents a 0 bytes range (when
- * cur_offset == drop_end).
- */
- if (cur_offset < ino_size && cur_offset < drop_end) {
- ret = fill_holes(trans, inode, path, cur_offset, drop_end);
- if (ret) {
- err = ret;
- goto out_trans;
- }
- }
- out_trans:
- if (!trans)
- goto out_free;
- inode_inc_iversion(inode);
- inode->i_mtime = inode->i_ctime = current_time(inode);
- trans->block_rsv = &root->fs_info->trans_block_rsv;
- ret = btrfs_update_inode(trans, root, inode);
- updated_inode = true;
- btrfs_end_transaction(trans, root);
- btrfs_btree_balance_dirty(root);
- out_free:
- btrfs_free_path(path);
- btrfs_free_block_rsv(root, rsv);
- out:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
- out_only_mutex:
- if (!updated_inode && truncated_block && !ret && !err) {
- /*
- * If we only end up zeroing part of a page, we still need to
- * update the inode item, so that all the time fields are
- * updated as well as the necessary btrfs inode in memory fields
- * for detecting, at fsync time, if the inode isn't yet in the
- * log tree or it's there but not up to date.
- */
- struct timespec now = current_time(inode);
- inode_inc_iversion(inode);
- inode->i_mtime = now;
- inode->i_ctime = now;
- trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- } else {
- err = btrfs_update_inode(trans, root, inode);
- ret = btrfs_end_transaction(trans, root);
- }
- }
- inode_unlock(inode);
- if (ret && !err)
- err = ret;
- return err;
- }
- /* Helper structure to record which range is already reserved */
- struct falloc_range {
- struct list_head list;
- u64 start;
- u64 len;
- };
- /*
- * Helper function to add falloc range
- *
- * Caller should have locked the larger range of extent containing
- * [start, len)
- */
- static int add_falloc_range(struct list_head *head, u64 start, u64 len)
- {
- struct falloc_range *prev = NULL;
- struct falloc_range *range = NULL;
- if (list_empty(head))
- goto insert;
- /*
- * As fallocate iterate by bytenr order, we only need to check
- * the last range.
- */
- prev = list_entry(head->prev, struct falloc_range, list);
- if (prev->start + prev->len == start) {
- prev->len += len;
- return 0;
- }
- insert:
- range = kmalloc(sizeof(*range), GFP_KERNEL);
- if (!range)
- return -ENOMEM;
- range->start = start;
- range->len = len;
- list_add_tail(&range->list, head);
- return 0;
- }
- static long btrfs_fallocate(struct file *file, int mode,
- loff_t offset, loff_t len)
- {
- struct inode *inode = file_inode(file);
- struct extent_state *cached_state = NULL;
- struct falloc_range *range;
- struct falloc_range *tmp;
- struct list_head reserve_list;
- u64 cur_offset;
- u64 last_byte;
- u64 alloc_start;
- u64 alloc_end;
- u64 alloc_hint = 0;
- u64 locked_end;
- u64 actual_end = 0;
- struct extent_map *em;
- int blocksize = BTRFS_I(inode)->root->sectorsize;
- int ret;
- alloc_start = round_down(offset, blocksize);
- alloc_end = round_up(offset + len, blocksize);
- cur_offset = alloc_start;
- /* Make sure we aren't being give some crap mode */
- if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
- return -EOPNOTSUPP;
- if (mode & FALLOC_FL_PUNCH_HOLE)
- return btrfs_punch_hole(inode, offset, len);
- /*
- * Only trigger disk allocation, don't trigger qgroup reserve
- *
- * For qgroup space, it will be checked later.
- */
- ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start);
- if (ret < 0)
- return ret;
- inode_lock(inode);
- if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
- ret = inode_newsize_ok(inode, offset + len);
- if (ret)
- goto out;
- }
- /*
- * TODO: Move these two operations after we have checked
- * accurate reserved space, or fallocate can still fail but
- * with page truncated or size expanded.
- *
- * But that's a minor problem and won't do much harm BTW.
- */
- if (alloc_start > inode->i_size) {
- ret = btrfs_cont_expand(inode, i_size_read(inode),
- alloc_start);
- if (ret)
- goto out;
- } else if (offset + len > inode->i_size) {
- /*
- * If we are fallocating from the end of the file onward we
- * need to zero out the end of the block if i_size lands in the
- * middle of a block.
- */
- ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
- if (ret)
- goto out;
- }
- /*
- * wait for ordered IO before we have any locks. We'll loop again
- * below with the locks held.
- */
- ret = btrfs_wait_ordered_range(inode, alloc_start,
- alloc_end - alloc_start);
- if (ret)
- goto out;
- locked_end = alloc_end - 1;
- while (1) {
- struct btrfs_ordered_extent *ordered;
- /* the extent lock is ordered inside the running
- * transaction
- */
- lock_extent_bits(&BTRFS_I(inode)->io_tree, alloc_start,
- locked_end, &cached_state);
- ordered = btrfs_lookup_first_ordered_extent(inode,
- alloc_end - 1);
- if (ordered &&
- ordered->file_offset + ordered->len > alloc_start &&
- ordered->file_offset < alloc_end) {
- btrfs_put_ordered_extent(ordered);
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
- /*
- * we can't wait on the range with the transaction
- * running or with the extent lock held
- */
- ret = btrfs_wait_ordered_range(inode, alloc_start,
- alloc_end - alloc_start);
- if (ret)
- goto out;
- } else {
- if (ordered)
- btrfs_put_ordered_extent(ordered);
- break;
- }
- }
- /* First, check if we exceed the qgroup limit */
- INIT_LIST_HEAD(&reserve_list);
- while (1) {
- em = btrfs_get_extent(inode, NULL, 0, cur_offset,
- alloc_end - cur_offset, 0);
- if (IS_ERR_OR_NULL(em)) {
- if (!em)
- ret = -ENOMEM;
- else
- ret = PTR_ERR(em);
- break;
- }
- last_byte = min(extent_map_end(em), alloc_end);
- actual_end = min_t(u64, extent_map_end(em), offset + len);
- last_byte = ALIGN(last_byte, blocksize);
- if (em->block_start == EXTENT_MAP_HOLE ||
- (cur_offset >= inode->i_size &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
- ret = add_falloc_range(&reserve_list, cur_offset,
- last_byte - cur_offset);
- if (ret < 0) {
- free_extent_map(em);
- break;
- }
- ret = btrfs_qgroup_reserve_data(inode, cur_offset,
- last_byte - cur_offset);
- if (ret < 0) {
- free_extent_map(em);
- break;
- }
- } else {
- /*
- * Do not need to reserve unwritten extent for this
- * range, free reserved data space first, otherwise
- * it'll result in false ENOSPC error.
- */
- btrfs_free_reserved_data_space(inode, cur_offset,
- last_byte - cur_offset);
- }
- free_extent_map(em);
- cur_offset = last_byte;
- if (cur_offset >= alloc_end)
- break;
- }
- /*
- * If ret is still 0, means we're OK to fallocate.
- * Or just cleanup the list and exit.
- */
- list_for_each_entry_safe(range, tmp, &reserve_list, list) {
- if (!ret)
- ret = btrfs_prealloc_file_range(inode, mode,
- range->start,
- range->len, i_blocksize(inode),
- offset + len, &alloc_hint);
- else
- btrfs_free_reserved_data_space(inode, range->start,
- range->len);
- list_del(&range->list);
- kfree(range);
- }
- if (ret < 0)
- goto out_unlock;
- if (actual_end > inode->i_size &&
- !(mode & FALLOC_FL_KEEP_SIZE)) {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(inode)->root;
- /*
- * We didn't need to allocate any more space, but we
- * still extended the size of the file so we need to
- * update i_size and the inode item.
- */
- trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- ret = PTR_ERR(trans);
- } else {
- inode->i_ctime = current_time(inode);
- i_size_write(inode, actual_end);
- btrfs_ordered_update_i_size(inode, actual_end, NULL);
- ret = btrfs_update_inode(trans, root, inode);
- if (ret)
- btrfs_end_transaction(trans, root);
- else
- ret = btrfs_end_transaction(trans, root);
- }
- }
- out_unlock:
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
- &cached_state, GFP_KERNEL);
- out:
- inode_unlock(inode);
- /* Let go of our reservation. */
- if (ret != 0)
- btrfs_free_reserved_data_space(inode, alloc_start,
- alloc_end - cur_offset);
- return ret;
- }
- static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
- {
- struct btrfs_root *root = BTRFS_I(inode)->root;
- struct extent_map *em = NULL;
- struct extent_state *cached_state = NULL;
- u64 lockstart;
- u64 lockend;
- u64 start;
- u64 len;
- int ret = 0;
- if (inode->i_size == 0)
- return -ENXIO;
- /*
- * *offset can be negative, in this case we start finding DATA/HOLE from
- * the very start of the file.
- */
- start = max_t(loff_t, 0, *offset);
- lockstart = round_down(start, root->sectorsize);
- lockend = round_up(i_size_read(inode), root->sectorsize);
- if (lockend <= lockstart)
- lockend = lockstart + root->sectorsize;
- lockend--;
- len = lockend - lockstart + 1;
- lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state);
- while (start < inode->i_size) {
- em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
- if (IS_ERR(em)) {
- ret = PTR_ERR(em);
- em = NULL;
- break;
- }
- if (whence == SEEK_HOLE &&
- (em->block_start == EXTENT_MAP_HOLE ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
- break;
- else if (whence == SEEK_DATA &&
- (em->block_start != EXTENT_MAP_HOLE &&
- !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
- break;
- start = em->start + em->len;
- free_extent_map(em);
- em = NULL;
- cond_resched();
- }
- free_extent_map(em);
- if (!ret) {
- if (whence == SEEK_DATA && start >= inode->i_size)
- ret = -ENXIO;
- else
- *offset = min_t(loff_t, start, inode->i_size);
- }
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
- &cached_state, GFP_NOFS);
- return ret;
- }
- static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
- {
- struct inode *inode = file->f_mapping->host;
- int ret;
- inode_lock(inode);
- switch (whence) {
- case SEEK_END:
- case SEEK_CUR:
- offset = generic_file_llseek(file, offset, whence);
- goto out;
- case SEEK_DATA:
- case SEEK_HOLE:
- if (offset >= i_size_read(inode)) {
- inode_unlock(inode);
- return -ENXIO;
- }
- ret = find_desired_extent(inode, &offset, whence);
- if (ret) {
- inode_unlock(inode);
- return ret;
- }
- }
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
- out:
- inode_unlock(inode);
- return offset;
- }
- const struct file_operations btrfs_file_operations = {
- .llseek = btrfs_file_llseek,
- .read_iter = generic_file_read_iter,
- .splice_read = generic_file_splice_read,
- .write_iter = btrfs_file_write_iter,
- .mmap = btrfs_file_mmap,
- .open = generic_file_open,
- .release = btrfs_release_file,
- .fsync = btrfs_sync_file,
- .fallocate = btrfs_fallocate,
- .unlocked_ioctl = btrfs_ioctl,
- #ifdef CONFIG_COMPAT
- .compat_ioctl = btrfs_compat_ioctl,
- #endif
- .copy_file_range = btrfs_copy_file_range,
- .clone_file_range = btrfs_clone_file_range,
- .dedupe_file_range = btrfs_dedupe_file_range,
- };
- void btrfs_auto_defrag_exit(void)
- {
- kmem_cache_destroy(btrfs_inode_defrag_cachep);
- }
- int btrfs_auto_defrag_init(void)
- {
- btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
- sizeof(struct inode_defrag), 0,
- SLAB_MEM_SPREAD,
- NULL);
- if (!btrfs_inode_defrag_cachep)
- return -ENOMEM;
- return 0;
- }
- int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
- {
- int ret;
- /*
- * So with compression we will find and lock a dirty page and clear the
- * first one as dirty, setup an async extent, and immediately return
- * with the entire range locked but with nobody actually marked with
- * writeback. So we can't just filemap_write_and_wait_range() and
- * expect it to work since it will just kick off a thread to do the
- * actual work. So we need to call filemap_fdatawrite_range _again_
- * since it will wait on the page lock, which won't be unlocked until
- * after the pages have been marked as writeback and so we're good to go
- * from there. We have to do this otherwise we'll miss the ordered
- * extents and that results in badness. Please Josef, do not think you
- * know better and pull this out at some point in the future, it is
- * right and you are wrong.
- */
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
- return ret;
- }
|