xattr.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. /*
  2. * Copyright (C) International Business Machines Corp., 2000-2004
  3. * Copyright (C) Christoph Hellwig, 2002
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/capability.h>
  20. #include <linux/fs.h>
  21. #include <linux/xattr.h>
  22. #include <linux/posix_acl_xattr.h>
  23. #include <linux/slab.h>
  24. #include <linux/quotaops.h>
  25. #include <linux/security.h>
  26. #include "jfs_incore.h"
  27. #include "jfs_superblock.h"
  28. #include "jfs_dmap.h"
  29. #include "jfs_debug.h"
  30. #include "jfs_dinode.h"
  31. #include "jfs_extent.h"
  32. #include "jfs_metapage.h"
  33. #include "jfs_xattr.h"
  34. #include "jfs_acl.h"
  35. /*
  36. * jfs_xattr.c: extended attribute service
  37. *
  38. * Overall design --
  39. *
  40. * Format:
  41. *
  42. * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit
  43. * value) and a variable (0 or more) number of extended attribute
  44. * entries. Each extended attribute entry (jfs_ea) is a <name,value> double
  45. * where <name> is constructed from a null-terminated ascii string
  46. * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data
  47. * (1 ... 65535 bytes). The in-memory format is
  48. *
  49. * 0 1 2 4 4 + namelen + 1
  50. * +-------+--------+--------+----------------+-------------------+
  51. * | Flags | Name | Value | Name String \0 | Data . . . . |
  52. * | | Length | Length | | |
  53. * +-------+--------+--------+----------------+-------------------+
  54. *
  55. * A jfs_ea_list then is structured as
  56. *
  57. * 0 4 4 + EA_SIZE(ea1)
  58. * +------------+-------------------+--------------------+-----
  59. * | Overall EA | First FEA Element | Second FEA Element | .....
  60. * | List Size | | |
  61. * +------------+-------------------+--------------------+-----
  62. *
  63. * On-disk:
  64. *
  65. * FEALISTs are stored on disk using blocks allocated by dbAlloc() and
  66. * written directly. An EA list may be in-lined in the inode if there is
  67. * sufficient room available.
  68. */
  69. struct ea_buffer {
  70. int flag; /* Indicates what storage xattr points to */
  71. int max_size; /* largest xattr that fits in current buffer */
  72. dxd_t new_ea; /* dxd to replace ea when modifying xattr */
  73. struct metapage *mp; /* metapage containing ea list */
  74. struct jfs_ea_list *xattr; /* buffer containing ea list */
  75. };
  76. /*
  77. * ea_buffer.flag values
  78. */
  79. #define EA_INLINE 0x0001
  80. #define EA_EXTENT 0x0002
  81. #define EA_NEW 0x0004
  82. #define EA_MALLOC 0x0008
  83. /*
  84. * Mapping of on-disk attribute names: for on-disk attribute names with an
  85. * unknown prefix (not "system.", "user.", "security.", or "trusted."), the
  86. * prefix "os2." is prepended. On the way back to disk, "os2." prefixes are
  87. * stripped and we make sure that the remaining name does not start with one
  88. * of the know prefixes.
  89. */
  90. static int is_known_namespace(const char *name)
  91. {
  92. if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
  93. strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
  94. strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
  95. strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
  96. return false;
  97. return true;
  98. }
  99. static inline int name_size(struct jfs_ea *ea)
  100. {
  101. if (is_known_namespace(ea->name))
  102. return ea->namelen;
  103. else
  104. return ea->namelen + XATTR_OS2_PREFIX_LEN;
  105. }
  106. static inline int copy_name(char *buffer, struct jfs_ea *ea)
  107. {
  108. int len = ea->namelen;
  109. if (!is_known_namespace(ea->name)) {
  110. memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN);
  111. buffer += XATTR_OS2_PREFIX_LEN;
  112. len += XATTR_OS2_PREFIX_LEN;
  113. }
  114. memcpy(buffer, ea->name, ea->namelen);
  115. buffer[ea->namelen] = 0;
  116. return len;
  117. }
  118. /* Forward references */
  119. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf);
  120. /*
  121. * NAME: ea_write_inline
  122. *
  123. * FUNCTION: Attempt to write an EA inline if area is available
  124. *
  125. * PRE CONDITIONS:
  126. * Already verified that the specified EA is small enough to fit inline
  127. *
  128. * PARAMETERS:
  129. * ip - Inode pointer
  130. * ealist - EA list pointer
  131. * size - size of ealist in bytes
  132. * ea - dxd_t structure to be filled in with necessary EA information
  133. * if we successfully copy the EA inline
  134. *
  135. * NOTES:
  136. * Checks if the inode's inline area is available. If so, copies EA inline
  137. * and sets <ea> fields appropriately. Otherwise, returns failure, EA will
  138. * have to be put into an extent.
  139. *
  140. * RETURNS: 0 for successful copy to inline area; -1 if area not available
  141. */
  142. static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist,
  143. int size, dxd_t * ea)
  144. {
  145. struct jfs_inode_info *ji = JFS_IP(ip);
  146. /*
  147. * Make sure we have an EA -- the NULL EA list is valid, but you
  148. * can't copy it!
  149. */
  150. if (ealist && size > sizeof (struct jfs_ea_list)) {
  151. assert(size <= sizeof (ji->i_inline_ea));
  152. /*
  153. * See if the space is available or if it is already being
  154. * used for an inline EA.
  155. */
  156. if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE))
  157. return -EPERM;
  158. DXDsize(ea, size);
  159. DXDlength(ea, 0);
  160. DXDaddress(ea, 0);
  161. memcpy(ji->i_inline_ea, ealist, size);
  162. ea->flag = DXD_INLINE;
  163. ji->mode2 &= ~INLINEEA;
  164. } else {
  165. ea->flag = 0;
  166. DXDsize(ea, 0);
  167. DXDlength(ea, 0);
  168. DXDaddress(ea, 0);
  169. /* Free up INLINE area */
  170. if (ji->ea.flag & DXD_INLINE)
  171. ji->mode2 |= INLINEEA;
  172. }
  173. return 0;
  174. }
  175. /*
  176. * NAME: ea_write
  177. *
  178. * FUNCTION: Write an EA for an inode
  179. *
  180. * PRE CONDITIONS: EA has been verified
  181. *
  182. * PARAMETERS:
  183. * ip - Inode pointer
  184. * ealist - EA list pointer
  185. * size - size of ealist in bytes
  186. * ea - dxd_t structure to be filled in appropriately with where the
  187. * EA was copied
  188. *
  189. * NOTES: Will write EA inline if able to, otherwise allocates blocks for an
  190. * extent and synchronously writes it to those blocks.
  191. *
  192. * RETURNS: 0 for success; Anything else indicates failure
  193. */
  194. static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size,
  195. dxd_t * ea)
  196. {
  197. struct super_block *sb = ip->i_sb;
  198. struct jfs_inode_info *ji = JFS_IP(ip);
  199. struct jfs_sb_info *sbi = JFS_SBI(sb);
  200. int nblocks;
  201. s64 blkno;
  202. int rc = 0, i;
  203. char *cp;
  204. s32 nbytes, nb;
  205. s32 bytes_to_write;
  206. struct metapage *mp;
  207. /*
  208. * Quick check to see if this is an in-linable EA. Short EAs
  209. * and empty EAs are all in-linable, provided the space exists.
  210. */
  211. if (!ealist || size <= sizeof (ji->i_inline_ea)) {
  212. if (!ea_write_inline(ip, ealist, size, ea))
  213. return 0;
  214. }
  215. /* figure out how many blocks we need */
  216. nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits;
  217. /* Allocate new blocks to quota. */
  218. rc = dquot_alloc_block(ip, nblocks);
  219. if (rc)
  220. return rc;
  221. rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
  222. if (rc) {
  223. /*Rollback quota allocation. */
  224. dquot_free_block(ip, nblocks);
  225. return rc;
  226. }
  227. /*
  228. * Now have nblocks worth of storage to stuff into the FEALIST.
  229. * loop over the FEALIST copying data into the buffer one page at
  230. * a time.
  231. */
  232. cp = (char *) ealist;
  233. nbytes = size;
  234. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  235. /*
  236. * Determine how many bytes for this request, and round up to
  237. * the nearest aggregate block size
  238. */
  239. nb = min(PSIZE, nbytes);
  240. bytes_to_write =
  241. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  242. << sb->s_blocksize_bits;
  243. if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) {
  244. rc = -EIO;
  245. goto failed;
  246. }
  247. memcpy(mp->data, cp, nb);
  248. /*
  249. * We really need a way to propagate errors for
  250. * forced writes like this one. --hch
  251. *
  252. * (__write_metapage => release_metapage => flush_metapage)
  253. */
  254. #ifdef _JFS_FIXME
  255. if ((rc = flush_metapage(mp))) {
  256. /*
  257. * the write failed -- this means that the buffer
  258. * is still assigned and the blocks are not being
  259. * used. this seems like the best error recovery
  260. * we can get ...
  261. */
  262. goto failed;
  263. }
  264. #else
  265. flush_metapage(mp);
  266. #endif
  267. cp += PSIZE;
  268. nbytes -= nb;
  269. }
  270. ea->flag = DXD_EXTENT;
  271. DXDsize(ea, le32_to_cpu(ealist->size));
  272. DXDlength(ea, nblocks);
  273. DXDaddress(ea, blkno);
  274. /* Free up INLINE area */
  275. if (ji->ea.flag & DXD_INLINE)
  276. ji->mode2 |= INLINEEA;
  277. return 0;
  278. failed:
  279. /* Rollback quota allocation. */
  280. dquot_free_block(ip, nblocks);
  281. dbFree(ip, blkno, nblocks);
  282. return rc;
  283. }
  284. /*
  285. * NAME: ea_read_inline
  286. *
  287. * FUNCTION: Read an inlined EA into user's buffer
  288. *
  289. * PARAMETERS:
  290. * ip - Inode pointer
  291. * ealist - Pointer to buffer to fill in with EA
  292. *
  293. * RETURNS: 0
  294. */
  295. static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist)
  296. {
  297. struct jfs_inode_info *ji = JFS_IP(ip);
  298. int ea_size = sizeDXD(&ji->ea);
  299. if (ea_size == 0) {
  300. ealist->size = 0;
  301. return 0;
  302. }
  303. /* Sanity Check */
  304. if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea)))
  305. return -EIO;
  306. if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size)
  307. != ea_size)
  308. return -EIO;
  309. memcpy(ealist, ji->i_inline_ea, ea_size);
  310. return 0;
  311. }
  312. /*
  313. * NAME: ea_read
  314. *
  315. * FUNCTION: copy EA data into user's buffer
  316. *
  317. * PARAMETERS:
  318. * ip - Inode pointer
  319. * ealist - Pointer to buffer to fill in with EA
  320. *
  321. * NOTES: If EA is inline calls ea_read_inline() to copy EA.
  322. *
  323. * RETURNS: 0 for success; other indicates failure
  324. */
  325. static int ea_read(struct inode *ip, struct jfs_ea_list *ealist)
  326. {
  327. struct super_block *sb = ip->i_sb;
  328. struct jfs_inode_info *ji = JFS_IP(ip);
  329. struct jfs_sb_info *sbi = JFS_SBI(sb);
  330. int nblocks;
  331. s64 blkno;
  332. char *cp = (char *) ealist;
  333. int i;
  334. int nbytes, nb;
  335. s32 bytes_to_read;
  336. struct metapage *mp;
  337. /* quick check for in-line EA */
  338. if (ji->ea.flag & DXD_INLINE)
  339. return ea_read_inline(ip, ealist);
  340. nbytes = sizeDXD(&ji->ea);
  341. if (!nbytes) {
  342. jfs_error(sb, "nbytes is 0\n");
  343. return -EIO;
  344. }
  345. /*
  346. * Figure out how many blocks were allocated when this EA list was
  347. * originally written to disk.
  348. */
  349. nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage;
  350. blkno = addressDXD(&ji->ea) << sbi->l2nbperpage;
  351. /*
  352. * I have found the disk blocks which were originally used to store
  353. * the FEALIST. now i loop over each contiguous block copying the
  354. * data into the buffer.
  355. */
  356. for (i = 0; i < nblocks; i += sbi->nbperpage) {
  357. /*
  358. * Determine how many bytes for this request, and round up to
  359. * the nearest aggregate block size
  360. */
  361. nb = min(PSIZE, nbytes);
  362. bytes_to_read =
  363. ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits))
  364. << sb->s_blocksize_bits;
  365. if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1)))
  366. return -EIO;
  367. memcpy(cp, mp->data, nb);
  368. release_metapage(mp);
  369. cp += PSIZE;
  370. nbytes -= nb;
  371. }
  372. return 0;
  373. }
  374. /*
  375. * NAME: ea_get
  376. *
  377. * FUNCTION: Returns buffer containing existing extended attributes.
  378. * The size of the buffer will be the larger of the existing
  379. * attributes size, or min_size.
  380. *
  381. * The buffer, which may be inlined in the inode or in the
  382. * page cache must be release by calling ea_release or ea_put
  383. *
  384. * PARAMETERS:
  385. * inode - Inode pointer
  386. * ea_buf - Structure to be populated with ealist and its metadata
  387. * min_size- minimum size of buffer to be returned
  388. *
  389. * RETURNS: 0 for success; Other indicates failure
  390. */
  391. static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
  392. {
  393. struct jfs_inode_info *ji = JFS_IP(inode);
  394. struct super_block *sb = inode->i_sb;
  395. int size;
  396. int ea_size = sizeDXD(&ji->ea);
  397. int blocks_needed, current_blocks;
  398. s64 blkno;
  399. int rc;
  400. int quota_allocation = 0;
  401. /* When fsck.jfs clears a bad ea, it doesn't clear the size */
  402. if (ji->ea.flag == 0)
  403. ea_size = 0;
  404. if (ea_size == 0) {
  405. if (min_size == 0) {
  406. ea_buf->flag = 0;
  407. ea_buf->max_size = 0;
  408. ea_buf->xattr = NULL;
  409. return 0;
  410. }
  411. if ((min_size <= sizeof (ji->i_inline_ea)) &&
  412. (ji->mode2 & INLINEEA)) {
  413. ea_buf->flag = EA_INLINE | EA_NEW;
  414. ea_buf->max_size = sizeof (ji->i_inline_ea);
  415. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  416. DXDlength(&ea_buf->new_ea, 0);
  417. DXDaddress(&ea_buf->new_ea, 0);
  418. ea_buf->new_ea.flag = DXD_INLINE;
  419. DXDsize(&ea_buf->new_ea, min_size);
  420. return 0;
  421. }
  422. current_blocks = 0;
  423. } else if (ji->ea.flag & DXD_INLINE) {
  424. if (min_size <= sizeof (ji->i_inline_ea)) {
  425. ea_buf->flag = EA_INLINE;
  426. ea_buf->max_size = sizeof (ji->i_inline_ea);
  427. ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea;
  428. goto size_check;
  429. }
  430. current_blocks = 0;
  431. } else {
  432. if (!(ji->ea.flag & DXD_EXTENT)) {
  433. jfs_error(sb, "invalid ea.flag\n");
  434. return -EIO;
  435. }
  436. current_blocks = (ea_size + sb->s_blocksize - 1) >>
  437. sb->s_blocksize_bits;
  438. }
  439. size = max(min_size, ea_size);
  440. if (size > PSIZE) {
  441. /*
  442. * To keep the rest of the code simple. Allocate a
  443. * contiguous buffer to work with. Make the buffer large
  444. * enough to make use of the whole extent.
  445. */
  446. ea_buf->max_size = (size + sb->s_blocksize - 1) &
  447. ~(sb->s_blocksize - 1);
  448. ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
  449. if (ea_buf->xattr == NULL)
  450. return -ENOMEM;
  451. ea_buf->flag = EA_MALLOC;
  452. if (ea_size == 0)
  453. return 0;
  454. if ((rc = ea_read(inode, ea_buf->xattr))) {
  455. kfree(ea_buf->xattr);
  456. ea_buf->xattr = NULL;
  457. return rc;
  458. }
  459. goto size_check;
  460. }
  461. blocks_needed = (min_size + sb->s_blocksize - 1) >>
  462. sb->s_blocksize_bits;
  463. if (blocks_needed > current_blocks) {
  464. /* Allocate new blocks to quota. */
  465. rc = dquot_alloc_block(inode, blocks_needed);
  466. if (rc)
  467. return -EDQUOT;
  468. quota_allocation = blocks_needed;
  469. rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
  470. &blkno);
  471. if (rc)
  472. goto clean_up;
  473. DXDlength(&ea_buf->new_ea, blocks_needed);
  474. DXDaddress(&ea_buf->new_ea, blkno);
  475. ea_buf->new_ea.flag = DXD_EXTENT;
  476. DXDsize(&ea_buf->new_ea, min_size);
  477. ea_buf->flag = EA_EXTENT | EA_NEW;
  478. ea_buf->mp = get_metapage(inode, blkno,
  479. blocks_needed << sb->s_blocksize_bits,
  480. 1);
  481. if (ea_buf->mp == NULL) {
  482. dbFree(inode, blkno, (s64) blocks_needed);
  483. rc = -EIO;
  484. goto clean_up;
  485. }
  486. ea_buf->xattr = ea_buf->mp->data;
  487. ea_buf->max_size = (min_size + sb->s_blocksize - 1) &
  488. ~(sb->s_blocksize - 1);
  489. if (ea_size == 0)
  490. return 0;
  491. if ((rc = ea_read(inode, ea_buf->xattr))) {
  492. discard_metapage(ea_buf->mp);
  493. dbFree(inode, blkno, (s64) blocks_needed);
  494. goto clean_up;
  495. }
  496. goto size_check;
  497. }
  498. ea_buf->flag = EA_EXTENT;
  499. ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea),
  500. lengthDXD(&ji->ea) << sb->s_blocksize_bits,
  501. 1);
  502. if (ea_buf->mp == NULL) {
  503. rc = -EIO;
  504. goto clean_up;
  505. }
  506. ea_buf->xattr = ea_buf->mp->data;
  507. ea_buf->max_size = (ea_size + sb->s_blocksize - 1) &
  508. ~(sb->s_blocksize - 1);
  509. size_check:
  510. if (EALIST_SIZE(ea_buf->xattr) != ea_size) {
  511. printk(KERN_ERR "ea_get: invalid extended attribute\n");
  512. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1,
  513. ea_buf->xattr, ea_size, 1);
  514. ea_release(inode, ea_buf);
  515. rc = -EIO;
  516. goto clean_up;
  517. }
  518. return ea_size;
  519. clean_up:
  520. /* Rollback quota allocation */
  521. if (quota_allocation)
  522. dquot_free_block(inode, quota_allocation);
  523. return (rc);
  524. }
  525. static void ea_release(struct inode *inode, struct ea_buffer *ea_buf)
  526. {
  527. if (ea_buf->flag & EA_MALLOC)
  528. kfree(ea_buf->xattr);
  529. else if (ea_buf->flag & EA_EXTENT) {
  530. assert(ea_buf->mp);
  531. release_metapage(ea_buf->mp);
  532. if (ea_buf->flag & EA_NEW)
  533. dbFree(inode, addressDXD(&ea_buf->new_ea),
  534. lengthDXD(&ea_buf->new_ea));
  535. }
  536. }
  537. static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf,
  538. int new_size)
  539. {
  540. struct jfs_inode_info *ji = JFS_IP(inode);
  541. unsigned long old_blocks, new_blocks;
  542. int rc = 0;
  543. if (new_size == 0) {
  544. ea_release(inode, ea_buf);
  545. ea_buf = NULL;
  546. } else if (ea_buf->flag & EA_INLINE) {
  547. assert(new_size <= sizeof (ji->i_inline_ea));
  548. ji->mode2 &= ~INLINEEA;
  549. ea_buf->new_ea.flag = DXD_INLINE;
  550. DXDsize(&ea_buf->new_ea, new_size);
  551. DXDaddress(&ea_buf->new_ea, 0);
  552. DXDlength(&ea_buf->new_ea, 0);
  553. } else if (ea_buf->flag & EA_MALLOC) {
  554. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  555. kfree(ea_buf->xattr);
  556. } else if (ea_buf->flag & EA_NEW) {
  557. /* We have already allocated a new dxd */
  558. flush_metapage(ea_buf->mp);
  559. } else {
  560. /* ->xattr must point to original ea's metapage */
  561. rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea);
  562. discard_metapage(ea_buf->mp);
  563. }
  564. if (rc)
  565. return rc;
  566. old_blocks = new_blocks = 0;
  567. if (ji->ea.flag & DXD_EXTENT) {
  568. invalidate_dxd_metapages(inode, ji->ea);
  569. old_blocks = lengthDXD(&ji->ea);
  570. }
  571. if (ea_buf) {
  572. txEA(tid, inode, &ji->ea, &ea_buf->new_ea);
  573. if (ea_buf->new_ea.flag & DXD_EXTENT) {
  574. new_blocks = lengthDXD(&ea_buf->new_ea);
  575. if (ji->ea.flag & DXD_INLINE)
  576. ji->mode2 |= INLINEEA;
  577. }
  578. ji->ea = ea_buf->new_ea;
  579. } else {
  580. txEA(tid, inode, &ji->ea, NULL);
  581. if (ji->ea.flag & DXD_INLINE)
  582. ji->mode2 |= INLINEEA;
  583. ji->ea.flag = 0;
  584. ji->ea.size = 0;
  585. }
  586. /* If old blocks exist, they must be removed from quota allocation. */
  587. if (old_blocks)
  588. dquot_free_block(inode, old_blocks);
  589. inode->i_ctime = current_time(inode);
  590. return 0;
  591. }
  592. int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name,
  593. const void *value, size_t value_len, int flags)
  594. {
  595. struct jfs_ea_list *ealist;
  596. struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL;
  597. struct ea_buffer ea_buf;
  598. int old_ea_size = 0;
  599. int xattr_size;
  600. int new_size;
  601. int namelen = strlen(name);
  602. int found = 0;
  603. int rc;
  604. int length;
  605. down_write(&JFS_IP(inode)->xattr_sem);
  606. xattr_size = ea_get(inode, &ea_buf, 0);
  607. if (xattr_size < 0) {
  608. rc = xattr_size;
  609. goto out;
  610. }
  611. again:
  612. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  613. new_size = sizeof (struct jfs_ea_list);
  614. if (xattr_size) {
  615. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist);
  616. ea = NEXT_EA(ea)) {
  617. if ((namelen == ea->namelen) &&
  618. (memcmp(name, ea->name, namelen) == 0)) {
  619. found = 1;
  620. if (flags & XATTR_CREATE) {
  621. rc = -EEXIST;
  622. goto release;
  623. }
  624. old_ea = ea;
  625. old_ea_size = EA_SIZE(ea);
  626. next_ea = NEXT_EA(ea);
  627. } else
  628. new_size += EA_SIZE(ea);
  629. }
  630. }
  631. if (!found) {
  632. if (flags & XATTR_REPLACE) {
  633. rc = -ENODATA;
  634. goto release;
  635. }
  636. if (value == NULL) {
  637. rc = 0;
  638. goto release;
  639. }
  640. }
  641. if (value)
  642. new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len;
  643. if (new_size > ea_buf.max_size) {
  644. /*
  645. * We need to allocate more space for merged ea list.
  646. * We should only have loop to again: once.
  647. */
  648. ea_release(inode, &ea_buf);
  649. xattr_size = ea_get(inode, &ea_buf, new_size);
  650. if (xattr_size < 0) {
  651. rc = xattr_size;
  652. goto out;
  653. }
  654. goto again;
  655. }
  656. /* Remove old ea of the same name */
  657. if (found) {
  658. /* number of bytes following target EA */
  659. length = (char *) END_EALIST(ealist) - (char *) next_ea;
  660. if (length > 0)
  661. memmove(old_ea, next_ea, length);
  662. xattr_size -= old_ea_size;
  663. }
  664. /* Add new entry to the end */
  665. if (value) {
  666. if (xattr_size == 0)
  667. /* Completely new ea list */
  668. xattr_size = sizeof (struct jfs_ea_list);
  669. /*
  670. * The size of EA value is limitted by on-disk format up to
  671. * __le16, there would be an overflow if the size is equal
  672. * to XATTR_SIZE_MAX (65536). In order to avoid this issue,
  673. * we can pre-checkup the value size against USHRT_MAX, and
  674. * return -E2BIG in this case, which is consistent with the
  675. * VFS setxattr interface.
  676. */
  677. if (value_len >= USHRT_MAX) {
  678. rc = -E2BIG;
  679. goto release;
  680. }
  681. ea = (struct jfs_ea *) ((char *) ealist + xattr_size);
  682. ea->flag = 0;
  683. ea->namelen = namelen;
  684. ea->valuelen = (cpu_to_le16(value_len));
  685. memcpy(ea->name, name, namelen);
  686. ea->name[namelen] = 0;
  687. if (value_len)
  688. memcpy(&ea->name[namelen + 1], value, value_len);
  689. xattr_size += EA_SIZE(ea);
  690. }
  691. /* DEBUG - If we did this right, these number match */
  692. if (xattr_size != new_size) {
  693. printk(KERN_ERR
  694. "__jfs_setxattr: xattr_size = %d, new_size = %d\n",
  695. xattr_size, new_size);
  696. rc = -EINVAL;
  697. goto release;
  698. }
  699. /*
  700. * If we're left with an empty list, there's no ea
  701. */
  702. if (new_size == sizeof (struct jfs_ea_list))
  703. new_size = 0;
  704. ealist->size = cpu_to_le32(new_size);
  705. rc = ea_put(tid, inode, &ea_buf, new_size);
  706. goto out;
  707. release:
  708. ea_release(inode, &ea_buf);
  709. out:
  710. up_write(&JFS_IP(inode)->xattr_sem);
  711. return rc;
  712. }
  713. ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data,
  714. size_t buf_size)
  715. {
  716. struct jfs_ea_list *ealist;
  717. struct jfs_ea *ea;
  718. struct ea_buffer ea_buf;
  719. int xattr_size;
  720. ssize_t size;
  721. int namelen = strlen(name);
  722. char *value;
  723. down_read(&JFS_IP(inode)->xattr_sem);
  724. xattr_size = ea_get(inode, &ea_buf, 0);
  725. if (xattr_size < 0) {
  726. size = xattr_size;
  727. goto out;
  728. }
  729. if (xattr_size == 0)
  730. goto not_found;
  731. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  732. /* Find the named attribute */
  733. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea))
  734. if ((namelen == ea->namelen) &&
  735. memcmp(name, ea->name, namelen) == 0) {
  736. /* Found it */
  737. size = le16_to_cpu(ea->valuelen);
  738. if (!data)
  739. goto release;
  740. else if (size > buf_size) {
  741. size = -ERANGE;
  742. goto release;
  743. }
  744. value = ((char *) &ea->name) + ea->namelen + 1;
  745. memcpy(data, value, size);
  746. goto release;
  747. }
  748. not_found:
  749. size = -ENODATA;
  750. release:
  751. ea_release(inode, &ea_buf);
  752. out:
  753. up_read(&JFS_IP(inode)->xattr_sem);
  754. return size;
  755. }
  756. /*
  757. * No special permissions are needed to list attributes except for trusted.*
  758. */
  759. static inline int can_list(struct jfs_ea *ea)
  760. {
  761. return (strncmp(ea->name, XATTR_TRUSTED_PREFIX,
  762. XATTR_TRUSTED_PREFIX_LEN) ||
  763. capable(CAP_SYS_ADMIN));
  764. }
  765. ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size)
  766. {
  767. struct inode *inode = d_inode(dentry);
  768. char *buffer;
  769. ssize_t size = 0;
  770. int xattr_size;
  771. struct jfs_ea_list *ealist;
  772. struct jfs_ea *ea;
  773. struct ea_buffer ea_buf;
  774. down_read(&JFS_IP(inode)->xattr_sem);
  775. xattr_size = ea_get(inode, &ea_buf, 0);
  776. if (xattr_size < 0) {
  777. size = xattr_size;
  778. goto out;
  779. }
  780. if (xattr_size == 0)
  781. goto release;
  782. ealist = (struct jfs_ea_list *) ea_buf.xattr;
  783. /* compute required size of list */
  784. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  785. if (can_list(ea))
  786. size += name_size(ea) + 1;
  787. }
  788. if (!data)
  789. goto release;
  790. if (size > buf_size) {
  791. size = -ERANGE;
  792. goto release;
  793. }
  794. /* Copy attribute names to buffer */
  795. buffer = data;
  796. for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) {
  797. if (can_list(ea)) {
  798. int namelen = copy_name(buffer, ea);
  799. buffer += namelen + 1;
  800. }
  801. }
  802. release:
  803. ea_release(inode, &ea_buf);
  804. out:
  805. up_read(&JFS_IP(inode)->xattr_sem);
  806. return size;
  807. }
  808. static int __jfs_xattr_set(struct inode *inode, const char *name,
  809. const void *value, size_t size, int flags)
  810. {
  811. struct jfs_inode_info *ji = JFS_IP(inode);
  812. tid_t tid;
  813. int rc;
  814. tid = txBegin(inode->i_sb, 0);
  815. mutex_lock(&ji->commit_mutex);
  816. rc = __jfs_setxattr(tid, inode, name, value, size, flags);
  817. if (!rc)
  818. rc = txCommit(tid, 1, &inode, 0);
  819. txEnd(tid);
  820. mutex_unlock(&ji->commit_mutex);
  821. return rc;
  822. }
  823. static int jfs_xattr_get(const struct xattr_handler *handler,
  824. struct dentry *unused, struct inode *inode,
  825. const char *name, void *value, size_t size)
  826. {
  827. name = xattr_full_name(handler, name);
  828. return __jfs_getxattr(inode, name, value, size);
  829. }
  830. static int jfs_xattr_set(const struct xattr_handler *handler,
  831. struct dentry *unused, struct inode *inode,
  832. const char *name, const void *value,
  833. size_t size, int flags)
  834. {
  835. name = xattr_full_name(handler, name);
  836. return __jfs_xattr_set(inode, name, value, size, flags);
  837. }
  838. static int jfs_xattr_get_os2(const struct xattr_handler *handler,
  839. struct dentry *unused, struct inode *inode,
  840. const char *name, void *value, size_t size)
  841. {
  842. if (is_known_namespace(name))
  843. return -EOPNOTSUPP;
  844. return __jfs_getxattr(inode, name, value, size);
  845. }
  846. static int jfs_xattr_set_os2(const struct xattr_handler *handler,
  847. struct dentry *unused, struct inode *inode,
  848. const char *name, const void *value,
  849. size_t size, int flags)
  850. {
  851. if (is_known_namespace(name))
  852. return -EOPNOTSUPP;
  853. return __jfs_xattr_set(inode, name, value, size, flags);
  854. }
  855. static const struct xattr_handler jfs_user_xattr_handler = {
  856. .prefix = XATTR_USER_PREFIX,
  857. .get = jfs_xattr_get,
  858. .set = jfs_xattr_set,
  859. };
  860. static const struct xattr_handler jfs_os2_xattr_handler = {
  861. .prefix = XATTR_OS2_PREFIX,
  862. .get = jfs_xattr_get_os2,
  863. .set = jfs_xattr_set_os2,
  864. };
  865. static const struct xattr_handler jfs_security_xattr_handler = {
  866. .prefix = XATTR_SECURITY_PREFIX,
  867. .get = jfs_xattr_get,
  868. .set = jfs_xattr_set,
  869. };
  870. static const struct xattr_handler jfs_trusted_xattr_handler = {
  871. .prefix = XATTR_TRUSTED_PREFIX,
  872. .get = jfs_xattr_get,
  873. .set = jfs_xattr_set,
  874. };
  875. const struct xattr_handler *jfs_xattr_handlers[] = {
  876. #ifdef CONFIG_JFS_POSIX_ACL
  877. &posix_acl_access_xattr_handler,
  878. &posix_acl_default_xattr_handler,
  879. #endif
  880. &jfs_os2_xattr_handler,
  881. &jfs_user_xattr_handler,
  882. &jfs_security_xattr_handler,
  883. &jfs_trusted_xattr_handler,
  884. NULL,
  885. };
  886. #ifdef CONFIG_JFS_SECURITY
  887. static int jfs_initxattrs(struct inode *inode, const struct xattr *xattr_array,
  888. void *fs_info)
  889. {
  890. const struct xattr *xattr;
  891. tid_t *tid = fs_info;
  892. char *name;
  893. int err = 0;
  894. for (xattr = xattr_array; xattr->name != NULL; xattr++) {
  895. name = kmalloc(XATTR_SECURITY_PREFIX_LEN +
  896. strlen(xattr->name) + 1, GFP_NOFS);
  897. if (!name) {
  898. err = -ENOMEM;
  899. break;
  900. }
  901. strcpy(name, XATTR_SECURITY_PREFIX);
  902. strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name);
  903. err = __jfs_setxattr(*tid, inode, name,
  904. xattr->value, xattr->value_len, 0);
  905. kfree(name);
  906. if (err < 0)
  907. break;
  908. }
  909. return err;
  910. }
  911. int jfs_init_security(tid_t tid, struct inode *inode, struct inode *dir,
  912. const struct qstr *qstr)
  913. {
  914. return security_inode_init_security(inode, dir, qstr,
  915. &jfs_initxattrs, &tid);
  916. }
  917. #endif