Enscript Output

extractedLnx/linux-2.6.22/fs/xfs/xfs_bmap.c_xfs_bmapi.c

int					/* error */
xfs_bmapi(
	xfs_trans_t	*tp,		/* transaction pointer */
	xfs_inode_t	*ip,		/* incore inode */
	xfs_fileoff_t	bno,		/* starting file offs. mapped */
	xfs_filblks_t	len,		/* length to map in file */
	int		flags,		/* XFS_BMAPI_... */
	xfs_fsblock_t	*firstblock,	/* first allocated block
					   controls a.g. for allocs */
	xfs_extlen_t	total,		/* total blocks needed */
	xfs_bmbt_irec_t	*mval,		/* output: map values */
	int		*nmap,		/* i/o: mval size/count */
	xfs_bmap_free_t	*flist,		/* i/o: list extents to free */
	xfs_extdelta_t	*delta)		/* o: change made to incore extents */
{
	xfs_fsblock_t	abno;		/* allocated block number */
	xfs_extlen_t	alen;		/* allocated extent length */
	xfs_fileoff_t	aoff;		/* allocated file offset */
	xfs_bmalloca_t	bma;		/* args for xfs_bmap_alloc */
	xfs_btree_cur_t	*cur;		/* bmap btree cursor */
	xfs_fileoff_t	end;		/* end of mapped file region */
	int		eof;		/* we've hit the end of extents */
	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
	int		error;		/* error return */
	xfs_bmbt_irec_t	got;		/* current file extent record */
	xfs_ifork_t	*ifp;		/* inode fork pointer */
	xfs_extlen_t	indlen;		/* indirect blocks length */
	xfs_extnum_t	lastx;		/* last useful extent number */
	int		logflags;	/* flags for transaction logging */
	xfs_extlen_t	minleft;	/* min blocks left after allocation */
	xfs_extlen_t	minlen;		/* min allocation size */
	xfs_mount_t	*mp;		/* xfs mount structure */
	int		n;		/* current extent index */
	int		nallocs;	/* number of extents alloc\'d */
	xfs_extnum_t	nextents;	/* number of extents in file */
	xfs_fileoff_t	obno;		/* old block number (offset) */
	xfs_bmbt_irec_t	prev;		/* previous file extent record */
	int		tmp_logflags;	/* temp flags holder */
	int		whichfork;	/* data or attr fork */
	char		inhole;		/* current location is hole in file */
	char		wasdelay;	/* old extent was delayed */
	char		wr;		/* this is a write request */
	char		rt;		/* this is a realtime file */
#ifdef DEBUG
	xfs_fileoff_t	orig_bno;	/* original block number value */
	int		orig_flags;	/* original flags arg value */
	xfs_filblks_t	orig_len;	/* original value of len arg */
	xfs_bmbt_irec_t	*orig_mval;	/* original value of mval */
	int		orig_nmap;	/* original value of *nmap */

	orig_bno = bno;
	orig_len = len;
	orig_flags = flags;
	orig_mval = mval;
	orig_nmap = *nmap;
#endif
	ASSERT(*nmap >= 1);
	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
	whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
		XFS_ATTR_FORK : XFS_DATA_FORK;
	mp = ip->i_mount;
	if (unlikely(XFS_TEST_ERROR(
	    (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
	     XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
	     XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
	     mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
		XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
		return XFS_ERROR(EFSCORRUPTED);
	}
	if (XFS_FORCED_SHUTDOWN(mp))
		return XFS_ERROR(EIO);
	rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
	ifp = XFS_IFORK_PTR(ip, whichfork);
	ASSERT(ifp->if_ext_max ==
	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
	if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
		XFS_STATS_INC(xs_blk_mapw);
	else
		XFS_STATS_INC(xs_blk_mapr);
	/*
	 * IGSTATE flag is used to combine extents which
	 * differ only due to the state of the extents.
	 * This technique is used from xfs_getbmap()
	 * when the caller does not wish to see the
	 * separation (which is the default).
	 *
	 * This technique is also used when writing a
	 * buffer which has been partially written,
	 * (usually by being flushed during a chunkread),
	 * to ensure one write takes place. This also
	 * prevents a change in the xfs inode extents at
	 * this time, intentionally. This change occurs
	 * on completion of the write operation, in
	 * xfs_strat_comp(), where the xfs_bmapi() call
	 * is transactioned, and the extents combined.
	 */
	if ((flags & XFS_BMAPI_IGSTATE) && wr)	/* if writing unwritten space */
		wr = 0;				/* no allocations are allowed */
	ASSERT(wr || !(flags & XFS_BMAPI_DELAY));
	logflags = 0;
	nallocs = 0;
	cur = NULL;
	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
		ASSERT(wr && tp);
		if ((error = xfs_bmap_local_to_extents(tp, ip,
				firstblock, total, &logflags, whichfork)))
			goto error0;
	}
	if (wr && *firstblock == NULLFSBLOCK) {
		if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
			minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
		else
			minleft = 1;
	} else
		minleft = 0;
	if (!(ifp->if_flags & XFS_IFEXTENTS) &&
	    (error = xfs_iread_extents(tp, ip, whichfork)))
		goto error0;
	ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
		&prev);
	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
	n = 0;
	end = bno + len;
	obno = bno;
	bma.ip = NULL;
	if (delta) {
		delta->xed_startoff = NULLFILEOFF;
		delta->xed_blockcount = 0;
	}
	while (bno < end && n < *nmap) {
		/*
		 * Reading past eof, act as though there's a hole
		 * up to end.
		 */
		if (eof && !wr)
			got.br_startoff = end;
		inhole = eof || got.br_startoff > bno;
		wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
			ISNULLSTARTBLOCK(got.br_startblock);
		/*
		 * First, deal with the hole before the allocated space
		 * that we found, if any.
		 */
		if (wr && (inhole || wasdelay)) {
			/*
			 * For the wasdelay case, we could also just
			 * allocate the stuff asked for in this bmap call
			 * but that wouldn't be as good.
			 */
			if (wasdelay && !(flags & XFS_BMAPI_EXACT)) {
				alen = (xfs_extlen_t)got.br_blockcount;
				aoff = got.br_startoff;
				if (lastx != NULLEXTNUM && lastx) {
					ep = xfs_iext_get_ext(ifp, lastx - 1);
					xfs_bmbt_get_all(ep, &prev);
				}
			} else if (wasdelay) {
				alen = (xfs_extlen_t)
					XFS_FILBLKS_MIN(len,
						(got.br_startoff +
						 got.br_blockcount) - bno);
				aoff = bno;
			} else {
				alen = (xfs_extlen_t)
					XFS_FILBLKS_MIN(len, MAXEXTLEN);
				if (!eof)
					alen = (xfs_extlen_t)
						XFS_FILBLKS_MIN(alen,
							got.br_startoff - bno);
				aoff = bno;
			}
			minlen = (flags & XFS_BMAPI_CONTIG) ? alen : 1;
			if (flags & XFS_BMAPI_DELAY) {
				xfs_extlen_t	extsz;

				/* Figure out the extent size, adjust alen */
				if (rt) {
					if (!(extsz = ip->i_d.di_extsize))
						extsz = mp->m_sb.sb_rextsize;
				} else {
					extsz = ip->i_d.di_extsize;
				}
				if (extsz) {
					error = xfs_bmap_extsize_align(mp,
							&got, &prev, extsz,
							rt, eof,
							flags&XFS_BMAPI_DELAY,
							flags&XFS_BMAPI_CONVERT,
							&aoff, &alen);
					ASSERT(!error);
				}

				if (rt)
					extsz = alen / mp->m_sb.sb_rextsize;

				/*
				 * Make a transaction-less quota reservation for
				 * delayed allocation blocks. This number gets
				 * adjusted later.  We return if we haven't
				 * allocated blocks already inside this loop.
				 */
				if ((error = XFS_TRANS_RESERVE_QUOTA_NBLKS(
						mp, NULL, ip, (long)alen, 0,
						rt ? XFS_QMOPT_RES_RTBLKS :
						     XFS_QMOPT_RES_REGBLKS))) {
					if (n == 0) {
						*nmap = 0;
						ASSERT(cur == NULL);
						return error;
					}
					break;
				}

				/*
				 * Split changing sb for alen and indlen since
				 * they could be coming from different places.
				 */
				indlen = (xfs_extlen_t)
					xfs_bmap_worst_indlen(ip, alen);
				ASSERT(indlen > 0);

				if (rt) {
					error = xfs_mod_incore_sb(mp,
							XFS_SBS_FREXTENTS,
							-((int64_t)extsz), (flags &
							XFS_BMAPI_RSVBLOCKS));
				} else {
					error = xfs_mod_incore_sb(mp,
							XFS_SBS_FDBLOCKS,
							-((int64_t)alen), (flags &
							XFS_BMAPI_RSVBLOCKS));
				}
				if (!error) {
					error = xfs_mod_incore_sb(mp,
							XFS_SBS_FDBLOCKS,
							-((int64_t)indlen), (flags &
							XFS_BMAPI_RSVBLOCKS));
					if (error && rt)
						xfs_mod_incore_sb(mp,
							XFS_SBS_FREXTENTS,
							(int64_t)extsz, (flags &
							XFS_BMAPI_RSVBLOCKS));
					else if (error)
						xfs_mod_incore_sb(mp,
							XFS_SBS_FDBLOCKS,
							(int64_t)alen, (flags &
							XFS_BMAPI_RSVBLOCKS));
				}

				if (error) {
					if (XFS_IS_QUOTA_ON(mp))
						/* unreserve the blocks now */
						(void)
						XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
							mp, NULL, ip,
							(long)alen, 0, rt ?
							XFS_QMOPT_RES_RTBLKS :
							XFS_QMOPT_RES_REGBLKS);
					break;
				}

				ip->i_delayed_blks += alen;
				abno = NULLSTARTBLOCK(indlen);
			} else {
				/*
				 * If first time, allocate and fill in
				 * once-only bma fields.
				 */
				if (bma.ip == NULL) {
					bma.tp = tp;
					bma.ip = ip;
					bma.prevp = &prev;
					bma.gotp = &got;
					bma.total = total;
					bma.userdata = 0;
				}
				/* Indicate if this is the first user data
				 * in the file, or just any user data.
				 */
				if (!(flags & XFS_BMAPI_METADATA)) {
					bma.userdata = (aoff == 0) ?
						XFS_ALLOC_INITIAL_USER_DATA :
						XFS_ALLOC_USERDATA;
				}
				/*
				 * Fill in changeable bma fields.
				 */
				bma.eof = eof;
				bma.firstblock = *firstblock;
				bma.alen = alen;
				bma.off = aoff;
				bma.conv = !!(flags & XFS_BMAPI_CONVERT);
				bma.wasdel = wasdelay;
				bma.minlen = minlen;
				bma.low = flist->xbf_low;
				bma.minleft = minleft;
				/*
				 * Only want to do the alignment at the
				 * eof if it is userdata and allocation length
				 * is larger than a stripe unit.
				 */
				if (mp->m_dalign && alen >= mp->m_dalign &&
				    (!(flags & XFS_BMAPI_METADATA)) &&
				    (whichfork == XFS_DATA_FORK)) {
					if ((error = xfs_bmap_isaeof(ip, aoff,
							whichfork, &bma.aeof)))
						goto error0;
				} else
					bma.aeof = 0;
				/*
				 * Call allocator.
				 */
				if ((error = xfs_bmap_alloc(&bma)))
					goto error0;
				/*
				 * Copy out result fields.
				 */
				abno = bma.rval;
				if ((flist->xbf_low = bma.low))
					minleft = 0;
				alen = bma.alen;
				aoff = bma.off;
				ASSERT(*firstblock == NULLFSBLOCK ||
				       XFS_FSB_TO_AGNO(mp, *firstblock) ==
				       XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
				       (flist->xbf_low &&
					XFS_FSB_TO_AGNO(mp, *firstblock) <
					XFS_FSB_TO_AGNO(mp, bma.firstblock)));
				*firstblock = bma.firstblock;
				if (cur)
					cur->bc_private.b.firstblock =
						*firstblock;
				if (abno == NULLFSBLOCK)
					break;
				if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
					cur = xfs_btree_init_cursor(mp,
						tp, NULL, 0, XFS_BTNUM_BMAP,
						ip, whichfork);
					cur->bc_private.b.firstblock =
						*firstblock;
					cur->bc_private.b.flist = flist;
				}
				/*
				 * Bump the number of extents we've allocated
				 * in this call.
				 */
				nallocs++;
			}
			if (cur)
				cur->bc_private.b.flags =
					wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
			got.br_startoff = aoff;
			got.br_startblock = abno;
			got.br_blockcount = alen;
			got.br_state = XFS_EXT_NORM;	/* assume normal */
			/*
			 * Determine state of extent, and the filesystem.
			 * A wasdelay extent has been initialized, so
			 * shouldn't be flagged as unwritten.
			 */
			if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
				if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
					got.br_state = XFS_EXT_UNWRITTEN;
			}
			error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
				firstblock, flist, &tmp_logflags, delta,
				whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
			logflags |= tmp_logflags;
			if (error)
				goto error0;
			lastx = ifp->if_lastex;
			ep = xfs_iext_get_ext(ifp, lastx);
			nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
			xfs_bmbt_get_all(ep, &got);
			ASSERT(got.br_startoff <= aoff);
			ASSERT(got.br_startoff + got.br_blockcount >=
				aoff + alen);
#ifdef DEBUG
			if (flags & XFS_BMAPI_DELAY) {
				ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
				ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
			}
			ASSERT(got.br_state == XFS_EXT_NORM ||
			       got.br_state == XFS_EXT_UNWRITTEN);
#endif
			/*
			 * Fall down into the found allocated space case.
			 */
		} else if (inhole) {
			/*
			 * Reading in a hole.
			 */
			mval->br_startoff = bno;
			mval->br_startblock = HOLESTARTBLOCK;
			mval->br_blockcount =
				XFS_FILBLKS_MIN(len, got.br_startoff - bno);
			mval->br_state = XFS_EXT_NORM;
			bno += mval->br_blockcount;
			len -= mval->br_blockcount;
			mval++;
			n++;
			continue;
		}
		/*
		 * Then deal with the allocated space we found.
		 */
		ASSERT(ep != NULL);
		if (!(flags & XFS_BMAPI_ENTIRE) &&
		    (got.br_startoff + got.br_blockcount > obno)) {
			if (obno > bno)
				bno = obno;
			ASSERT((bno >= obno) || (n == 0));
			ASSERT(bno < end);
			mval->br_startoff = bno;
			if (ISNULLSTARTBLOCK(got.br_startblock)) {
				ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
				mval->br_startblock = DELAYSTARTBLOCK;
			} else
				mval->br_startblock =
					got.br_startblock +
					(bno - got.br_startoff);
			/*
			 * Return the minimum of what we got and what we
			 * asked for for the length.  We can use the len
			 * variable here because it is modified below
			 * and we could have been there before coming
			 * here if the first part of the allocation
			 * didn't overlap what was asked for.
			 */
			mval->br_blockcount =
				XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
					(bno - got.br_startoff));
			mval->br_state = got.br_state;
			ASSERT(mval->br_blockcount <= len);
		} else {
			*mval = got;
			if (ISNULLSTARTBLOCK(mval->br_startblock)) {
				ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
				mval->br_startblock = DELAYSTARTBLOCK;
			}
		}

		/*
		 * Check if writing previously allocated but
		 * unwritten extents.
		 */
		if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
		    ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
			/*
			 * Modify (by adding) the state flag, if writing.
			 */
			ASSERT(mval->br_blockcount <= len);
			if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
				cur = xfs_btree_init_cursor(mp,
					tp, NULL, 0, XFS_BTNUM_BMAP,
					ip, whichfork);
				cur->bc_private.b.firstblock =
					*firstblock;
				cur->bc_private.b.flist = flist;
			}
			mval->br_state = XFS_EXT_NORM;
			error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
				firstblock, flist, &tmp_logflags, delta,
				whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
			logflags |= tmp_logflags;
			if (error)
				goto error0;
			lastx = ifp->if_lastex;
			ep = xfs_iext_get_ext(ifp, lastx);
			nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
			xfs_bmbt_get_all(ep, &got);
			/*
			 * We may have combined previously unwritten
			 * space with written space, so generate
			 * another request.
			 */
			if (mval->br_blockcount < len)
				continue;
		}

		ASSERT((flags & XFS_BMAPI_ENTIRE) ||
		       ((mval->br_startoff + mval->br_blockcount) <= end));
		ASSERT((flags & XFS_BMAPI_ENTIRE) ||
		       (mval->br_blockcount <= len) ||
		       (mval->br_startoff < obno));
		bno = mval->br_startoff + mval->br_blockcount;
		len = end - bno;
		if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
			ASSERT(mval->br_startblock == mval[-1].br_startblock);
			ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
			ASSERT(mval->br_state == mval[-1].br_state);
			mval[-1].br_blockcount = mval->br_blockcount;
			mval[-1].br_state = mval->br_state;
		} else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
			   mval[-1].br_startblock != DELAYSTARTBLOCK &&
			   mval[-1].br_startblock != HOLESTARTBLOCK &&
			   mval->br_startblock ==
			   mval[-1].br_startblock + mval[-1].br_blockcount &&
			   ((flags & XFS_BMAPI_IGSTATE) ||
				mval[-1].br_state == mval->br_state)) {
			ASSERT(mval->br_startoff ==
			       mval[-1].br_startoff + mval[-1].br_blockcount);
			mval[-1].br_blockcount += mval->br_blockcount;
		} else if (n > 0 &&
			   mval->br_startblock == DELAYSTARTBLOCK &&
			   mval[-1].br_startblock == DELAYSTARTBLOCK &&
			   mval->br_startoff ==
			   mval[-1].br_startoff + mval[-1].br_blockcount) {
			mval[-1].br_blockcount += mval->br_blockcount;
			mval[-1].br_state = mval->br_state;
		} else if (!((n == 0) &&
			     ((mval->br_startoff + mval->br_blockcount) <=
			      obno))) {
			mval++;
			n++;
		}
		/*
		 * If we're done, stop now.  Stop when we've allocated
		 * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
		 * the transaction may get too big.
		 */
		if (bno >= end || n >= *nmap || nallocs >= *nmap)
			break;
		/*
		 * Else go on to the next record.
		 */
		ep = xfs_iext_get_ext(ifp, ++lastx);
		if (lastx >= nextents) {
			eof = 1;
			prev = got;
		} else
			xfs_bmbt_get_all(ep, &got);
	}
	ifp->if_lastex = lastx;
	*nmap = n;
	/*
	 * Transform from btree to extents, give it cur.
	 */
	if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
	    XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
		ASSERT(wr && cur);
		error = xfs_bmap_btree_to_extents(tp, ip, cur,
			&tmp_logflags, whichfork);
		logflags |= tmp_logflags;
		if (error)
			goto error0;
	}
	ASSERT(ifp->if_ext_max ==
	       XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
	       XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
	error = 0;
	if (delta && delta->xed_startoff != NULLFILEOFF) {
		/* A change was actually made.
		 * Note that delta->xed_blockount is an offset at this
		 * point and needs to be converted to a block count.
		 */
		ASSERT(delta->xed_blockcount > delta->xed_startoff);
		delta->xed_blockcount -= delta->xed_startoff;
	}
error0:
	/*
	 * Log everything.  Do this after conversion, there's no point in
	 * logging the extent records if we've converted to btree format.
	 */
	if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
	    XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
		logflags &= ~XFS_ILOG_FEXT(whichfork);
	else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
		 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
		logflags &= ~XFS_ILOG_FBROOT(whichfork);
	/*
	 * Log whatever the flags say, even if error.  Otherwise we might miss
	 * detecting a case where the data is changed, there's an error,
	 * and it's not logged so we don't shutdown when we should.
	 */
	if (logflags) {
		ASSERT(tp && wr);
		xfs_trans_log_inode(tp, ip, logflags);
	}
	if (cur) {
		if (!error) {
			ASSERT(*firstblock == NULLFSBLOCK ||
			       XFS_FSB_TO_AGNO(mp, *firstblock) ==
			       XFS_FSB_TO_AGNO(mp,
				       cur->bc_private.b.firstblock) ||
			       (flist->xbf_low &&
				XFS_FSB_TO_AGNO(mp, *firstblock) <
				XFS_FSB_TO_AGNO(mp,
					cur->bc_private.b.firstblock)));
			*firstblock = cur->bc_private.b.firstblock;
		}
		xfs_btree_del_cursor(cur,
			error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
	}
	if (!error)
		xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
			orig_nmap, *nmap);
	return error;
}

Generated by GNU enscript 1.6.4.