0001: /*-
0002: * See the file LICENSE for redistribution information.
0003: *
0004: * Copyright (c) 2002,2008 Oracle. All rights reserved.
0005: *
0006: * $Id: FileManager.java,v 1.162.2.6 2008/01/07 15:14:13 cwl Exp $
0007: */
0008:
0009: package com.sleepycat.je.log;
0010:
0011: import java.io.File;
0012: import java.io.FileNotFoundException;
0013: import java.io.IOException;
0014: import java.io.RandomAccessFile;
0015: import java.nio.ByteBuffer;
0016: import java.nio.channels.ClosedChannelException;
0017: import java.nio.channels.FileChannel;
0018: import java.nio.channels.FileLock;
0019: import java.nio.channels.OverlappingFileLockException;
0020: import java.util.Arrays;
0021: import java.util.HashMap;
0022: import java.util.Hashtable;
0023: import java.util.Iterator;
0024: import java.util.LinkedList;
0025: import java.util.Map;
0026: import java.util.Random;
0027: import java.util.Set;
0028:
0029: import com.sleepycat.je.DatabaseException;
0030: import com.sleepycat.je.EnvironmentStats;
0031: import com.sleepycat.je.RunRecoveryException;
0032: import com.sleepycat.je.StatsConfig;
0033: import com.sleepycat.je.config.EnvironmentParams;
0034: import com.sleepycat.je.dbi.DbConfigManager;
0035: import com.sleepycat.je.dbi.EnvironmentImpl;
0036: import com.sleepycat.je.latch.Latch;
0037: import com.sleepycat.je.latch.LatchSupport;
0038: import com.sleepycat.je.log.entry.LogEntry;
0039: import com.sleepycat.je.log.entry.SingleItemEntry;
0040: import com.sleepycat.je.utilint.DbLsn;
0041: import com.sleepycat.je.utilint.HexFormatter;
0042:
0043: /**
0044: * The FileManager presents the abstraction of one contiguous file. It doles
0045: * out LSNs.
0046: */
0047: public class FileManager {
0048:
0049: /*
0050: * public for unit tests.
0051: */
0052: public static class FileMode {
0053: public static final FileMode READ_MODE = new FileMode("r",
0054: false);
0055: public static final FileMode READWRITE_MODE = new FileMode(
0056: "rw", true);
0057: public static final FileMode READWRITE_ODSYNC_MODE = new FileMode(
0058: "rwd", true);
0059:
0060: /* Not used, but included for posterity and possible future use. */
0061: public static final FileMode READWRITE_OSYNC_MODE = new FileMode(
0062: "rws", true);
0063:
0064: private String fileModeValue;
0065: private boolean isWritable;
0066:
0067: private FileMode(String fileModeValue, boolean isWritable) {
0068: this .fileModeValue = fileModeValue;
0069: this .isWritable = isWritable;
0070: }
0071:
0072: public boolean getIsWritable() {
0073: return isWritable;
0074: }
0075:
0076: public String getModeValue() {
0077: return fileModeValue;
0078: }
0079: }
0080:
0081: static boolean IO_EXCEPTION_TESTING_ON_WRITE = false;
0082: static boolean IO_EXCEPTION_TESTING_ON_READ = false;
0083: static boolean THROW_RRE_FOR_UNIT_TESTS = false;
0084: private static final String DEBUG_NAME = FileManager.class
0085: .getName();
0086: private static final boolean DEBUG = false;
0087:
0088: /*
0089: * The number of writes that have been performed.
0090: *
0091: * public so that unit tests can diddle them.
0092: */
0093: public static long WRITE_COUNT = 0;
0094:
0095: /*
0096: * The write count value where we should stop or throw.
0097: */
0098: public static long STOP_ON_WRITE_COUNT = Long.MAX_VALUE;
0099:
0100: /*
0101: * If we're throwing, then throw on write #'s WRITE_COUNT through
0102: * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive).
0103: */
0104: public static long N_BAD_WRITES = Long.MAX_VALUE;
0105:
0106: /*
0107: * If true, then throw an IOException on write #'s WRITE_COUNT through
0108: * WRITE_COUNT + N_BAD_WRITES - 1 (inclusive).
0109: */
0110: public static boolean THROW_ON_WRITE = false;
0111:
0112: public static final String JE_SUFFIX = ".jdb"; // regular log files
0113: public static final String DEL_SUFFIX = ".del"; // cleaned files
0114: public static final String BAD_SUFFIX = ".bad"; // corrupt files
0115: private static final String LOCK_FILE = "je.lck";// lock file
0116: static final String[] DEL_SUFFIXES = { DEL_SUFFIX };
0117: static final String[] JE_SUFFIXES = { JE_SUFFIX };
0118: private static final String[] JE_AND_DEL_SUFFIXES = { JE_SUFFIX,
0119: DEL_SUFFIX };
0120:
0121: /* May be set to false to speed unit tests. */
0122: private boolean syncAtFileEnd = true;
0123:
0124: private EnvironmentImpl envImpl;
0125: private long maxFileSize;
0126: private File dbEnvHome;
0127:
0128: /* True if .del files should be included in the list of log files. */
0129: private boolean includeDeletedFiles = false;
0130:
0131: /* File cache */
0132: private FileCache fileCache;
0133: private Latch fileCacheLatch;
0134:
0135: /* The channel and lock for the je.lck file. */
0136: private RandomAccessFile lockFile;
0137: private FileChannel channel;
0138: private FileLock envLock;
0139: private FileLock exclLock;
0140:
0141: /* True if all files should be opened readonly. */
0142: private boolean readOnly;
0143:
0144: /* Handles onto log position */
0145: private long currentFileNum; // number of the current file
0146: private long nextAvailableLsn; // nextLSN is the next one available
0147: private long lastUsedLsn; // last LSN used in the current log file
0148: private long prevOffset; // Offset to use for the previous pointer
0149: private boolean forceNewFile; // Force new file on next write
0150:
0151: /*
0152: * Saved versions of above. Save this in case a write causes an
0153: * IOException, we can back the log up to the last known good LSN.
0154: */
0155: private long savedCurrentFileNum;
0156: private long savedNextAvailableLsn; // nextLSN is the next one available
0157: private long savedLastUsedLsn; // last LSN used in the current log file
0158: private long savedPrevOffset; // Offset to use for the previous pointer
0159: private boolean savedForceNewFile;
0160:
0161: /* endOfLog is used for writes and fsyncs to the end of the log. */
0162: private LogEndFileDescriptor endOfLog;
0163:
0164: /* group commit sync */
0165: private FSyncManager syncManager;
0166:
0167: /*
0168: * When we bump the LSNs over to a new file, we must remember the last LSN
0169: * of the previous file so we can set the prevOffset field of the file
0170: * header appropriately. We have to save it in a map because there's a time
0171: * lag between when we know what the last LSN is and when we actually do
0172: * the file write, because LSN bumping is done before we get a write
0173: * buffer. This map is keyed by file num->last LSN.
0174: */
0175: private Map perFileLastUsedLsn;
0176:
0177: /* Whether to use NIO for file I/O. */
0178: private boolean useNIO;
0179:
0180: /*
0181: * If non-0, do NIO in chunks of this size.
0182: */
0183: private long chunkedNIOSize = 0;
0184:
0185: /*
0186: * Use O_DSYNC to open JE log files.
0187: */
0188: private final boolean useODSYNC;
0189:
0190: /**
0191: * Set up the file cache and initialize the file manager to point to the
0192: * beginning of the log.
0193: *
0194: * @param configManager
0195: * @param dbEnvHome environment home directory
0196: */
0197: public FileManager(EnvironmentImpl envImpl, File dbEnvHome,
0198: boolean readOnly) throws DatabaseException {
0199:
0200: this .envImpl = envImpl;
0201: this .dbEnvHome = dbEnvHome;
0202: this .readOnly = readOnly;
0203:
0204: /* Read configurations. */
0205: DbConfigManager configManager = envImpl.getConfigManager();
0206: maxFileSize = configManager
0207: .getLong(EnvironmentParams.LOG_FILE_MAX);
0208:
0209: useNIO = configManager
0210: .getBoolean(EnvironmentParams.LOG_USE_NIO);
0211: chunkedNIOSize = configManager
0212: .getLong(EnvironmentParams.LOG_CHUNKED_NIO);
0213: useODSYNC = configManager
0214: .getBoolean(EnvironmentParams.LOG_USE_ODSYNC);
0215: boolean directNIO = configManager
0216: .getBoolean(EnvironmentParams.LOG_DIRECT_NIO);
0217:
0218: if (!useNIO && (chunkedNIOSize > 0 || directNIO)) {
0219: throw new IllegalArgumentException(
0220: EnvironmentParams.LOG_USE_NIO.getName()
0221: + " is false and therefore "
0222: + EnvironmentParams.LOG_DIRECT_NIO
0223: .getName()
0224: + " or "
0225: + EnvironmentParams.LOG_CHUNKED_NIO
0226: .getName() + " may not be used.");
0227: }
0228:
0229: if (!envImpl.isMemOnly()) {
0230: if (!dbEnvHome.exists()) {
0231: throw new LogException("Environment home " + dbEnvHome
0232: + " doesn't exist");
0233: }
0234: lockEnvironment(readOnly, false);
0235: }
0236:
0237: /* Cache of files. */
0238: fileCache = new FileCache(configManager);
0239: fileCacheLatch = LatchSupport.makeLatch(DEBUG_NAME
0240: + "_fileCache", envImpl);
0241:
0242: /* Start out as if no log existed. */
0243: currentFileNum = 0L;
0244: nextAvailableLsn = DbLsn.makeLsn(currentFileNum,
0245: firstLogEntryOffset());
0246: lastUsedLsn = DbLsn.NULL_LSN;
0247: perFileLastUsedLsn = new HashMap();
0248: prevOffset = 0L;
0249: endOfLog = new LogEndFileDescriptor();
0250: forceNewFile = false;
0251: saveLastPosition();
0252:
0253: String stopOnWriteCountProp = System
0254: .getProperty("je.debug.stopOnWriteCount");
0255: if (stopOnWriteCountProp != null) {
0256: STOP_ON_WRITE_COUNT = Long.parseLong(stopOnWriteCountProp);
0257: }
0258:
0259: String stopOnWriteActionProp = System
0260: .getProperty("je.debug.stopOnWriteAction");
0261: if (stopOnWriteActionProp != null) {
0262: if (stopOnWriteActionProp.compareToIgnoreCase("throw") == 0) {
0263: THROW_ON_WRITE = true;
0264: } else if (stopOnWriteActionProp
0265: .compareToIgnoreCase("stop") == 0) {
0266: THROW_ON_WRITE = false;
0267: } else {
0268: throw new DatabaseException(
0269: "unknown value for je.debugStopOnWriteAction: "
0270: + stopOnWriteActionProp);
0271: }
0272: }
0273:
0274: syncManager = new FSyncManager(envImpl);
0275: }
0276:
0277: /**
0278: * Set the file manager's "end of log".
0279: *
0280: * @param nextAvailableLsn LSN to be used for the next log entry
0281: * @param lastUsedLsn last LSN to have a valid entry, may be null
0282: * @param prevOffset value to use for the prevOffset of the next entry.
0283: * If the beginning of the file, this is 0.
0284: */
0285: public void setLastPosition(long nextAvailableLsn,
0286: long lastUsedLsn, long prevOffset) {
0287: this .lastUsedLsn = lastUsedLsn;
0288: perFileLastUsedLsn.put(new Long(DbLsn
0289: .getFileNumber(lastUsedLsn)), new Long(lastUsedLsn));
0290: this .nextAvailableLsn = nextAvailableLsn;
0291: currentFileNum = DbLsn.getFileNumber(this .nextAvailableLsn);
0292: this .prevOffset = prevOffset;
0293: saveLastPosition();
0294: }
0295:
0296: /*
0297: * Cause the current LSN state to be saved in case we fail after we have
0298: * bumped the LSN pointer but before we've successfully marshalled into the
0299: * log buffer.
0300: */
0301: void saveLastPosition() {
0302: savedNextAvailableLsn = nextAvailableLsn;
0303: savedLastUsedLsn = lastUsedLsn;
0304: savedPrevOffset = prevOffset;
0305: savedForceNewFile = forceNewFile;
0306: savedCurrentFileNum = currentFileNum;
0307: }
0308:
0309: void restoreLastPosition() {
0310: nextAvailableLsn = savedNextAvailableLsn;
0311: lastUsedLsn = savedLastUsedLsn;
0312: prevOffset = savedPrevOffset;
0313: forceNewFile = savedForceNewFile;
0314: currentFileNum = savedCurrentFileNum;
0315: }
0316:
0317: /**
0318: * May be used to disable sync at file end to speed unit tests.
0319: * Must only be used for unit testing, since log corruption may result.
0320: */
0321: public void setSyncAtFileEnd(boolean sync) {
0322: syncAtFileEnd = sync;
0323: }
0324:
0325: /*
0326: * File management
0327: */
0328:
0329: /**
0330: * public for cleaner.
0331: *
0332: * @return the number of the first file in this environment.
0333: */
0334: public Long getFirstFileNum() {
0335: return getFileNum(true);
0336: }
0337:
0338: public boolean getReadOnly() {
0339: return readOnly;
0340: }
0341:
0342: /**
0343: * @return the number of the last file in this environment.
0344: */
0345: public Long getLastFileNum() {
0346: return getFileNum(false);
0347: }
0348:
0349: /*
0350: * For unit tests.
0351: */
0352: public long getCurrentFileNum() {
0353: return currentFileNum;
0354: }
0355:
0356: public void setIncludeDeletedFiles(boolean includeDeletedFiles) {
0357: this .includeDeletedFiles = includeDeletedFiles;
0358: }
0359:
0360: /**
0361: * Get all JE file numbers.
0362: * @return an array of all JE file numbers.
0363: */
0364: public Long[] getAllFileNumbers() {
0365: /* Get all the names in sorted order. */
0366: String[] names = listFiles(JE_SUFFIXES);
0367: Long[] nums = new Long[names.length];
0368: for (int i = 0; i < nums.length; i += 1) {
0369: nums[i] = getNumFromName(names[i]);
0370: }
0371: return nums;
0372: }
0373:
0374: /**
0375: * Get the next file number before/after currentFileNum.
0376: * @param currentFileNum the file we're at right now. Note that
0377: * it may not exist, if it's been cleaned and renamed.
0378: * @param forward if true, we want the next larger file, if false
0379: * we want the previous file
0380: * @return null if there is no following file, or if filenum doesn't exist
0381: */
0382: public Long getFollowingFileNum(long currentFileNum, boolean forward) {
0383: /* Get all the names in sorted order. */
0384: String[] names = listFiles(JE_SUFFIXES);
0385:
0386: /* Search for the current file. */
0387: String searchName = getFileName(currentFileNum, JE_SUFFIX);
0388: int foundIdx = Arrays.binarySearch(names, searchName);
0389:
0390: boolean foundTarget = false;
0391: if (foundIdx >= 0) {
0392: if (forward) {
0393: foundIdx++;
0394: } else {
0395: foundIdx--;
0396: }
0397: } else {
0398:
0399: /*
0400: * currentFileNum not found (might have been cleaned). FoundIdx
0401: * will be (-insertionPoint - 1).
0402: */
0403: foundIdx = Math.abs(foundIdx + 1);
0404: if (!forward) {
0405: foundIdx--;
0406: }
0407: }
0408:
0409: /* The current fileNum is found, return the next or prev file. */
0410: if (forward && (foundIdx < names.length)) {
0411: foundTarget = true;
0412: } else if (!forward && (foundIdx > -1)) {
0413: foundTarget = true;
0414: }
0415:
0416: if (foundTarget) {
0417: return getNumFromName(names[foundIdx]);
0418: } else {
0419: return null;
0420: }
0421: }
0422:
0423: /**
0424: * @return true if there are any files at all.
0425: */
0426: public boolean filesExist() {
0427: String[] names = listFiles(JE_SUFFIXES);
0428: return (names.length != 0);
0429: }
0430:
0431: /**
0432: * Get the first or last file number in the set of je files.
0433: *
0434: * @param first if true, get the first file, else get the last file
0435: * @return the file number or null if no files exist
0436: */
0437: private Long getFileNum(boolean first) {
0438: String[] names = listFiles(JE_SUFFIXES);
0439: if (names.length == 0) {
0440: return null;
0441: } else {
0442: int index = 0;
0443: if (!first) {
0444: index = names.length - 1;
0445: }
0446: return getNumFromName(names[index]);
0447: }
0448: }
0449:
0450: /**
0451: * Get the file number from a file name.
0452: *
0453: * @param the file name
0454: * @return the file number
0455: */
0456: public Long getNumFromName(String fileName) {
0457: String fileNumber = fileName
0458: .substring(0, fileName.indexOf("."));
0459: return new Long(Long.parseLong(fileNumber, 16));
0460: }
0461:
0462: /**
0463: * Find je files. Return names sorted in ascending fashion.
0464: * @param suffix which type of file we're looking for
0465: * @return array of file names
0466: */
0467: public String[] listFiles(String[] suffixes) {
0468: String[] fileNames = dbEnvHome.list(new JEFileFilter(suffixes));
0469: if (fileNames != null) {
0470: Arrays.sort(fileNames);
0471: } else {
0472: fileNames = new String[0];
0473: }
0474: return fileNames;
0475: }
0476:
0477: /**
0478: * Find .jdb files which are >= the minimimum file number and
0479: * <= the maximum file number.
0480: * Return names sorted in ascending fashion.
0481: *
0482: * @return array of file names
0483: */
0484: public String[] listFiles(long minFileNumber, long maxFileNumber) {
0485:
0486: String[] fileNames = dbEnvHome.list(new JEFileFilter(
0487: JE_SUFFIXES, minFileNumber, maxFileNumber));
0488: Arrays.sort(fileNames);
0489: return fileNames;
0490: }
0491:
0492: /**
0493: * Find je files, flavor for unit test support.
0494: *
0495: * @param suffix which type of file we're looking for
0496: * @return array of file names
0497: */
0498: public static String[] listFiles(File envDirFile, String[] suffixes) {
0499: String[] fileNames = envDirFile
0500: .list(new JEFileFilter(suffixes));
0501: if (fileNames != null) {
0502: Arrays.sort(fileNames);
0503: } else {
0504: fileNames = new String[0];
0505: }
0506: return fileNames;
0507: }
0508:
0509: /**
0510: * @return the full file name and path for the nth je file.
0511: */
0512: String[] getFullFileNames(long fileNum) {
0513: if (includeDeletedFiles) {
0514: int nSuffixes = JE_AND_DEL_SUFFIXES.length;
0515: String[] ret = new String[nSuffixes];
0516: for (int i = 0; i < nSuffixes; i++) {
0517: ret[i] = getFullFileName(getFileName(fileNum,
0518: JE_AND_DEL_SUFFIXES[i]));
0519: }
0520: return ret;
0521: } else {
0522: return new String[] { getFullFileName(getFileName(fileNum,
0523: JE_SUFFIX)) };
0524: }
0525: }
0526:
0527: /**
0528: * @return the full file name and path for the given file number and
0529: * suffix.
0530: */
0531: public String getFullFileName(long fileNum, String suffix) {
0532: return getFullFileName(getFileName(fileNum, suffix));
0533: }
0534:
0535: /**
0536: * @return the full file name and path for this file name.
0537: */
0538: private String getFullFileName(String fileName) {
0539: return dbEnvHome + File.separator + fileName;
0540: }
0541:
0542: /**
0543: * @return the file name for the nth file.
0544: */
0545: public static String getFileName(long fileNum, String suffix) {
0546:
0547: /*
0548: * HexFormatter generates a 0 padded string starting with 0x. We want
0549: * the right most 8 digits, so start at 10.
0550: */
0551: return (HexFormatter.formatLong(fileNum).substring(10) + suffix);
0552: }
0553:
0554: /**
0555: * Rename this file to NNNNNNNN.suffix. If that file already exists, try
0556: * NNNNNNNN.suffix.1, etc. Used for deleting files or moving corrupt files
0557: * aside.
0558: *
0559: * @param fileNum the file we want to move
0560: * @param newSuffix the new file suffix
0561: */
0562: public void renameFile(long fileNum, String newSuffix)
0563: throws DatabaseException, IOException {
0564:
0565: int repeatNum = 0;
0566: boolean renamed = false;
0567: while (!renamed) {
0568: String generation = "";
0569: if (repeatNum > 0) {
0570: generation = "." + repeatNum;
0571: }
0572: String newName = getFullFileName(getFileName(fileNum,
0573: newSuffix)
0574: + generation);
0575: File targetFile = new File(newName);
0576: if (targetFile.exists()) {
0577: repeatNum++;
0578: } else {
0579: String oldFileName = getFullFileNames(fileNum)[0];
0580: clearFileCache(fileNum);
0581: File oldFile = new File(oldFileName);
0582: if (oldFile.renameTo(targetFile)) {
0583: renamed = true;
0584: } else {
0585: throw new LogException("Couldn't rename "
0586: + oldFileName + " to " + newName);
0587: }
0588: }
0589: }
0590: }
0591:
0592: /**
0593: * Delete log file NNNNNNNN.
0594: *
0595: * @param fileNum the file we want to move
0596: */
0597: public void deleteFile(long fileNum) throws DatabaseException,
0598: IOException {
0599:
0600: String fileName = getFullFileNames(fileNum)[0];
0601: clearFileCache(fileNum);
0602: File file = new File(fileName);
0603: boolean done = file.delete();
0604: if (!done) {
0605: throw new LogException("Couldn't delete " + file);
0606: }
0607: }
0608:
0609: /**
0610: * Return a read only file handle that corresponds the this file number.
0611: * Retrieve it from the cache or open it anew and validate the file header.
0612: * This method takes a latch on this file, so that the file descriptor will
0613: * be held in the cache as long as it's in use. When the user is done with
0614: * the file, the latch must be released.
0615: *
0616: * @param fileNum which file
0617: * @return the file handle for the existing or newly created file
0618: */
0619: FileHandle getFileHandle(long fileNum) throws LogException,
0620: DatabaseException {
0621:
0622: /* Check the file cache for this file. */
0623: Long fileId = new Long(fileNum);
0624: FileHandle fileHandle = null;
0625:
0626: /**
0627: * Loop until we get an open FileHandle.
0628: */
0629: while (true) {
0630:
0631: /*
0632: * The file cache is intentionally not latched here so that it's
0633: * not a bottleneck in the fast path. We check that the file
0634: * handle that we get back is really still open after we latch it
0635: * down below.
0636: */
0637: fileHandle = fileCache.get(fileId);
0638:
0639: /* The file wasn't in the cache. */
0640: if (fileHandle == null) {
0641: fileCacheLatch.acquire();
0642: try {
0643: /* Check the file cache again under the latch. */
0644: fileHandle = fileCache.get(fileId);
0645: if (fileHandle == null) {
0646:
0647: fileHandle = makeFileHandle(fileNum,
0648: FileMode.READ_MODE);
0649:
0650: /* Put it into the cache. */
0651: fileCache.add(fileId, fileHandle);
0652: }
0653: } finally {
0654: fileCacheLatch.release();
0655: }
0656: }
0657:
0658: /* Get latch before returning */
0659: fileHandle.latch();
0660:
0661: /*
0662: * We may have obtained this file handle outside the file cache
0663: * latch, so we have to test that the handle is still valid. If
0664: * it's not, then loop back and try again.
0665: */
0666: if (fileHandle.getFile() == null) {
0667: fileHandle.release();
0668: } else {
0669: break;
0670: }
0671: }
0672:
0673: return fileHandle;
0674: }
0675:
0676: private FileMode getAppropriateReadWriteMode() {
0677: if (useODSYNC) {
0678: return FileMode.READWRITE_ODSYNC_MODE;
0679: } else {
0680: return FileMode.READWRITE_MODE;
0681: }
0682: }
0683:
0684: private FileHandle makeFileHandle(long fileNum, FileMode mode)
0685: throws DatabaseException {
0686:
0687: String[] fileNames = getFullFileNames(fileNum);
0688: RandomAccessFile newFile = null;
0689: String fileName = null;
0690: try {
0691:
0692: /*
0693: * Open the file. Note that we are going to try a few names to open
0694: * this file -- we'll try for N.jdb, and if that doesn't exist and
0695: * we're configured to look for all types, we'll look for N.del.
0696: */
0697: FileNotFoundException FNFE = null;
0698: for (int i = 0; i < fileNames.length; i++) {
0699: fileName = fileNames[i];
0700: try {
0701: newFile = new RandomAccessFile(fileName, mode
0702: .getModeValue());
0703: break;
0704: } catch (FileNotFoundException e) {
0705: /* Save the first exception thrown. */
0706: if (FNFE == null) {
0707: FNFE = e;
0708: }
0709: }
0710: }
0711:
0712: /*
0713: * If we didn't find the file or couldn't create it, rethrow the
0714: * exception.
0715: */
0716: if (newFile == null) {
0717: throw FNFE;
0718: }
0719:
0720: boolean oldHeaderVersion = false;
0721:
0722: if (newFile.length() == 0) {
0723:
0724: /*
0725: * If the file is empty, reinitialize it if we can. If not,
0726: * send the file handle back up; the calling code will deal
0727: * with the fact that there's nothing there.
0728: */
0729: if (mode.getIsWritable()) {
0730: /* An empty file, write a header. */
0731: long lastLsn = DbLsn
0732: .longToLsn((Long) perFileLastUsedLsn
0733: .remove(new Long(fileNum - 1)));
0734: long headerPrevOffset = 0;
0735: if (lastLsn != DbLsn.NULL_LSN) {
0736: headerPrevOffset = DbLsn.getFileOffset(lastLsn);
0737: }
0738: FileHeader fileHeader = new FileHeader(fileNum,
0739: headerPrevOffset);
0740: writeFileHeader(newFile, fileName, fileHeader,
0741: fileNum);
0742: }
0743: } else {
0744: /* A non-empty file, check the header */
0745: oldHeaderVersion = readAndValidateFileHeader(newFile,
0746: fileName, fileNum);
0747: }
0748: return new FileHandle(newFile, fileName, envImpl,
0749: oldHeaderVersion);
0750: } catch (FileNotFoundException e) {
0751: throw new LogFileNotFoundException("Couldn't open file "
0752: + fileName + ": " + e.getMessage());
0753: } catch (DbChecksumException e) {
0754:
0755: /*
0756: * Let this exception go as a checksum exception, so it sets the
0757: * run recovery state correctly.
0758: */
0759: closeFileInErrorCase(newFile);
0760: throw new DbChecksumException(envImpl,
0761: "Couldn't open file " + fileName, e);
0762: } catch (Throwable t) {
0763:
0764: /*
0765: * Catch Throwable here (rather than exception) because in unit
0766: * test mode, we run assertions and they throw errors. We want to
0767: * clean up the file object in all cases.
0768: */
0769: closeFileInErrorCase(newFile);
0770: throw new DatabaseException("Couldn't open file "
0771: + fileName + ": " + t, t);
0772: }
0773: }
0774:
0775: /**
0776: * Close this file and eat any exceptions. Used in catch clauses.
0777: */
0778: private void closeFileInErrorCase(RandomAccessFile file) {
0779: try {
0780: if (file != null) {
0781: file.close();
0782: }
0783: } catch (IOException e) {
0784:
0785: /*
0786: * Klockwork - ok
0787: * Couldn't close file, oh well.
0788: */
0789: }
0790: }
0791:
0792: /**
0793: * Read the given je log file and validate the header.
0794: *
0795: * @throws DatabaseException if the file header isn't valid
0796: *
0797: * @return whether the file header has an old version number.
0798: */
0799: private boolean readAndValidateFileHeader(RandomAccessFile file,
0800: String fileName, long fileNum) throws DatabaseException,
0801: IOException {
0802:
0803: /*
0804: * Read the file header from this file. It's always the first log
0805: * entry.
0806: */
0807: LogManager logManager = envImpl.getLogManager();
0808: LogEntry headerEntry = logManager.getLogEntry(DbLsn.makeLsn(
0809: fileNum, 0), file);
0810: FileHeader header = (FileHeader) headerEntry.getMainItem();
0811: return header.validate(fileName, fileNum);
0812: }
0813:
0814: /**
0815: * Write a proper file header to the given file.
0816: */
0817: private void writeFileHeader(RandomAccessFile file,
0818: String fileName, FileHeader header, long fileNum)
0819: throws DatabaseException {
0820:
0821: /*
0822: * Fail loudly if the environment is invalid. A RunRecoveryException
0823: * must have occurred.
0824: */
0825: envImpl.checkIfInvalid();
0826:
0827: /*
0828: * Fail silent if the environment is not open.
0829: */
0830: if (envImpl.mayNotWrite()) {
0831: return;
0832: }
0833:
0834: /* Write file header into this buffer in the usual log entry format. */
0835: LogEntry headerLogEntry = new SingleItemEntry(
0836: LogEntryType.LOG_FILE_HEADER, header);
0837: ByteBuffer headerBuf = envImpl.getLogManager().putIntoBuffer(
0838: headerLogEntry, 0); // prevLogEntryOffset
0839:
0840: /* Write the buffer into the channel. */
0841: int bytesWritten;
0842: try {
0843: if (RUNRECOVERY_EXCEPTION_TESTING) {
0844: generateRunRecoveryException(file, headerBuf, 0);
0845: }
0846: bytesWritten = writeToFile(file, headerBuf, 0);
0847:
0848: if (fileNum > savedCurrentFileNum) {
0849:
0850: /*
0851: * Writing the new file header succeeded without an IOE. This
0852: * can not be undone in the event of another IOE (Out Of Disk
0853: * Space) on the next write so update the saved LSN state with
0854: * the new info. Do not update the nextAvailableLsn with a
0855: * smaller (earlier) LSN in case there's already something in a
0856: * buffer that is after the new header. [#15754]
0857: */
0858: long lsnAfterHeader = DbLsn.makeLsn(fileNum,
0859: bytesWritten);
0860: if (DbLsn.compareTo(nextAvailableLsn, lsnAfterHeader) < 0) {
0861: nextAvailableLsn = lsnAfterHeader;
0862: }
0863:
0864: lastUsedLsn = DbLsn.makeLsn(fileNum, bytesWritten);
0865: prevOffset = bytesWritten;
0866: forceNewFile = false;
0867: currentFileNum = fileNum;
0868: saveLastPosition();
0869: }
0870: } catch (ClosedChannelException e) {
0871:
0872: /*
0873: * The channel should never be closed. It may be closed because
0874: * of an interrupt received by another thread. See SR [#10463]
0875: */
0876: throw new RunRecoveryException(envImpl,
0877: "Channel closed, may be due to thread interrupt", e);
0878: } catch (IOException e) {
0879: /* Possibly an out of disk exception. */
0880: throw new RunRecoveryException(envImpl,
0881: "IOException during write: " + e);
0882: }
0883:
0884: if (bytesWritten != headerLogEntry.getSize()
0885: + LogEntryHeader.MIN_HEADER_SIZE) {
0886: throw new LogException("File " + fileName
0887: + " was created with an incomplete header. Only "
0888: + bytesWritten + " bytes were written.");
0889: }
0890: }
0891:
0892: /**
0893: * @return the prevOffset field stored in the file header.
0894: */
0895: long getFileHeaderPrevOffset(long fileNum) throws IOException,
0896: DatabaseException {
0897:
0898: LogEntry headerEntry = envImpl.getLogManager().getLogEntry(
0899: DbLsn.makeLsn(fileNum, 0));
0900: FileHeader header = (FileHeader) headerEntry.getMainItem();
0901: return header.getLastEntryInPrevFileOffset();
0902: }
0903:
0904: /*
0905: * Support for writing new log entries
0906: */
0907:
0908: /**
0909: * @return the file offset of the last LSN that was used. For constructing
0910: * the headers of log entries. If the last LSN that was used was in a
0911: * previous file, or this is the very first LSN of the whole system, return
0912: * 0.
0913: */
0914: long getPrevEntryOffset() {
0915: return prevOffset;
0916: }
0917:
0918: /**
0919: * Increase the current log position by "size" bytes. Move the prevOffset
0920: * pointer along.
0921: *
0922: * @param size is an unsigned int
0923: * @return true if we flipped to the next log file.
0924: */
0925: boolean bumpLsn(long size) {
0926:
0927: /* Save copy of initial LSN state. */
0928: saveLastPosition();
0929:
0930: boolean flippedFiles = false;
0931:
0932: if (forceNewFile
0933: || (DbLsn.getFileOffset(nextAvailableLsn) + size) > maxFileSize) {
0934:
0935: forceNewFile = false;
0936:
0937: /* Move to another file. */
0938: currentFileNum++;
0939:
0940: /* Remember the last used LSN of the previous file. */
0941: if (lastUsedLsn != DbLsn.NULL_LSN) {
0942: perFileLastUsedLsn.put(new Long(DbLsn
0943: .getFileNumber(lastUsedLsn)), new Long(
0944: lastUsedLsn));
0945: }
0946: prevOffset = 0;
0947: lastUsedLsn = DbLsn.makeLsn(currentFileNum,
0948: firstLogEntryOffset());
0949: flippedFiles = true;
0950: } else {
0951: if (lastUsedLsn == DbLsn.NULL_LSN) {
0952: prevOffset = 0;
0953: } else {
0954: prevOffset = DbLsn.getFileOffset(lastUsedLsn);
0955: }
0956: lastUsedLsn = nextAvailableLsn;
0957: }
0958: nextAvailableLsn = DbLsn.makeLsn(DbLsn
0959: .getFileNumber(lastUsedLsn), (DbLsn
0960: .getFileOffset(lastUsedLsn) + size));
0961:
0962: return flippedFiles;
0963: }
0964:
0965: /**
0966: * Write out a log buffer to the file.
0967: * @param fullBuffer buffer to write
0968: */
0969: void writeLogBuffer(LogBuffer fullBuffer) throws DatabaseException {
0970:
0971: /*
0972: * Fail loudly if the environment is invalid. A RunRecoveryException
0973: * must have occurred.
0974: */
0975: envImpl.checkIfInvalid();
0976:
0977: /*
0978: * Fail silent if the environment is not open.
0979: */
0980: if (envImpl.mayNotWrite()) {
0981: return;
0982: }
0983:
0984: /* Use the LSN to figure out what file to write this buffer to. */
0985: long firstLsn = fullBuffer.getFirstLsn();
0986:
0987: /*
0988: * Is there anything in this write buffer? We could have been called by
0989: * the environment shutdown, and nothing is actually in the buffer.
0990: */
0991: if (firstLsn != DbLsn.NULL_LSN) {
0992:
0993: RandomAccessFile file = endOfLog.getWritableFile(DbLsn
0994: .getFileNumber(firstLsn));
0995: ByteBuffer data = fullBuffer.getDataBuffer();
0996:
0997: try {
0998:
0999: /*
1000: * Check that we do not overwrite unless the file only contains
1001: * a header [#11915] [#12616].
1002: */
1003: assert fullBuffer.getRewriteAllowed()
1004: || (DbLsn.getFileOffset(firstLsn) >= file
1005: .length() || file.length() == firstLogEntryOffset()) : "FileManager would overwrite non-empty file 0x"
1006: + Long.toHexString(DbLsn
1007: .getFileNumber(firstLsn))
1008: + " lsnOffset=0x"
1009: + Long.toHexString(DbLsn
1010: .getFileOffset(firstLsn))
1011: + " fileLength=0x"
1012: + Long.toHexString(file.length());
1013:
1014: if (IO_EXCEPTION_TESTING_ON_WRITE) {
1015: throw new IOException(
1016: "generated for testing (write)");
1017: }
1018: if (RUNRECOVERY_EXCEPTION_TESTING) {
1019: generateRunRecoveryException(file, data, DbLsn
1020: .getFileOffset(firstLsn));
1021: }
1022: writeToFile(file, data, DbLsn.getFileOffset(firstLsn));
1023: } catch (ClosedChannelException e) {
1024:
1025: /*
1026: * The file should never be closed. It may be closed because
1027: * of an interrupt received by another thread. See SR [#10463].
1028: */
1029: throw new RunRecoveryException(envImpl,
1030: "File closed, may be due to thread interrupt",
1031: e);
1032: } catch (IOException IOE) {
1033:
1034: if (!IO_EXCEPTION_TESTING_ON_WRITE
1035: || THROW_RRE_FOR_UNIT_TESTS) {
1036: throw new RunRecoveryException(envImpl,
1037: "IOE during write", IOE);
1038: } else {
1039:
1040: /*
1041: * Possibly an out of disk exception, but java.io will only
1042: * tell us IOException with no indication of whether it's
1043: * out of disk or something else.
1044: *
1045: * Since we can't tell what sectors were actually written
1046: * to disk, we need to change any commit records that might
1047: * have made it out to disk to abort records. If they made
1048: * it to disk on the write, then rewriting should allow
1049: * them to be rewritten. See [11271].
1050: */
1051: abortCommittedTxns(data);
1052: try {
1053: if (IO_EXCEPTION_TESTING_ON_WRITE) {
1054: throw new IOException(
1055: "generated for testing (write)");
1056: }
1057: writeToFile(file, data, DbLsn
1058: .getFileOffset(firstLsn));
1059: } catch (IOException IOE2) {
1060: fullBuffer.setRewriteAllowed();
1061: throw new DatabaseException(IOE2);
1062: }
1063: }
1064: }
1065:
1066: assert EnvironmentImpl.maybeForceYield();
1067: }
1068: }
1069:
1070: /**
1071: * Write a buffer to a file at a given offset, using NIO if so configured.
1072: */
1073: private int writeToFile(RandomAccessFile file, ByteBuffer data,
1074: long destOffset) throws IOException, DatabaseException {
1075:
1076: int totalBytesWritten = 0;
1077: if (useNIO) {
1078: FileChannel channel = file.getChannel();
1079:
1080: if (chunkedNIOSize > 0) {
1081:
1082: /*
1083: * We can't change the limit without impacting readers that
1084: * might find this buffer in the buffer pool. Duplicate the
1085: * buffer so we can set the limit independently.
1086: */
1087: ByteBuffer useData = data.duplicate();
1088:
1089: /*
1090: * Write small chunks of data by manipulating the position and
1091: * limit properties of the buffer, and submitting it for
1092: * writing repeatedly.
1093: *
1094: * For each chunk, the limit is set to the position +
1095: * chunkedNIOSize, capped by the original limit of the buffer.
1096: *
1097: * Preconditions: data to be written is betweek data.position()
1098: * and data.limit()
1099:
1100: * Postconditions: data.limit() has not changed,
1101: * data.position() == data.limit(), offset of the channel has
1102: * not been modified.
1103: */
1104: int originalLimit = useData.limit();
1105: useData.limit(useData.position());
1106: while (useData.limit() < originalLimit) {
1107: bumpWriteCount("nio write");
1108:
1109: useData.limit((int) (Math.min(useData.limit()
1110: + chunkedNIOSize, originalLimit)));
1111: int bytesWritten = channel.write(useData,
1112: destOffset);
1113: destOffset += bytesWritten;
1114: totalBytesWritten += bytesWritten;
1115: }
1116: } else {
1117:
1118: /*
1119: * Perform a single write using NIO.
1120: */
1121: totalBytesWritten = channel.write(data, destOffset);
1122: }
1123: } else {
1124:
1125: bumpWriteCount("write");
1126:
1127: /*
1128: * Perform a RandomAccessFile write and update the buffer position.
1129: * ByteBuffer.array() is safe to use since all non-direct
1130: * ByteBuffers have a backing array. Synchronization on the file
1131: * object is needed because two threads may call seek() on the same
1132: * file object.
1133: */
1134: synchronized (file) {
1135: assert data.hasArray();
1136: assert data.arrayOffset() == 0;
1137:
1138: int pos = data.position();
1139: int size = data.limit() - pos;
1140: file.seek(destOffset);
1141: file.write(data.array(), pos, size);
1142: data.position(pos + size);
1143: totalBytesWritten = size;
1144: }
1145: }
1146: return totalBytesWritten;
1147: }
1148:
1149: private void bumpWriteCount(final String debugMsg)
1150: throws IOException {
1151:
1152: if (DEBUG) {
1153: System.out
1154: .println("Write: " + WRITE_COUNT + " " + debugMsg);
1155: }
1156:
1157: if (++WRITE_COUNT >= STOP_ON_WRITE_COUNT
1158: && WRITE_COUNT < (STOP_ON_WRITE_COUNT + N_BAD_WRITES)) {
1159: if (THROW_ON_WRITE) {
1160: throw new IOException(
1161: "IOException generated for testing: "
1162: + WRITE_COUNT + " " + debugMsg);
1163: } else {
1164: Runtime.getRuntime().halt(0xff);
1165: }
1166: }
1167: }
1168:
1169: /**
1170: * Read a buffer from a file at a given offset, using NIO if so configured.
1171: */
1172: void readFromFile(RandomAccessFile file, ByteBuffer readBuffer,
1173: long offset) throws IOException {
1174:
1175: if (useNIO) {
1176: FileChannel channel = file.getChannel();
1177:
1178: if (chunkedNIOSize > 0) {
1179:
1180: /*
1181: * Read a chunk at a time to prevent large direct memory
1182: * allocations by NIO.
1183: */
1184: int readLength = readBuffer.limit();
1185: long currentPosition = offset;
1186: while (readBuffer.position() < readLength) {
1187: readBuffer.limit((int) (Math.min(readBuffer.limit()
1188: + chunkedNIOSize, readLength)));
1189: if (IO_EXCEPTION_TESTING_ON_READ) {
1190: throw new IOException(
1191: "generated for testing (read)");
1192: }
1193: int bytesRead = channel.read(readBuffer,
1194: currentPosition);
1195:
1196: if (bytesRead < 1)
1197: break;
1198:
1199: currentPosition += bytesRead;
1200: }
1201: } else {
1202:
1203: if (IO_EXCEPTION_TESTING_ON_READ) {
1204: throw new IOException(
1205: "generated for testing (read)");
1206: }
1207:
1208: /*
1209: * Perform a single read using NIO.
1210: */
1211: channel.read(readBuffer, offset);
1212: }
1213: } else {
1214:
1215: /*
1216: * Perform a RandomAccessFile read and update the buffer position.
1217: * ByteBuffer.array() is safe to use since all non-direct
1218: * ByteBuffers have a backing array. Synchronization on the file
1219: * object is needed because two threads may call seek() on the same
1220: * file object.
1221: */
1222: synchronized (file) {
1223: assert readBuffer.hasArray();
1224: assert readBuffer.arrayOffset() == 0;
1225:
1226: int pos = readBuffer.position();
1227: int size = readBuffer.limit() - pos;
1228: file.seek(offset);
1229: if (IO_EXCEPTION_TESTING_ON_READ) {
1230: throw new IOException(
1231: "generated for testing (read)");
1232: }
1233: int bytesRead = file
1234: .read(readBuffer.array(), pos, size);
1235: if (bytesRead > 0) {
1236: readBuffer.position(pos + bytesRead);
1237: }
1238: }
1239: }
1240: }
1241:
1242: /*
1243: * Iterate through a buffer looking for commit records. Change all commit
1244: * records to abort records.
1245: */
1246: private void abortCommittedTxns(ByteBuffer data)
1247: throws DatabaseException {
1248:
1249: final byte commitType = LogEntryType.LOG_TXN_COMMIT
1250: .getTypeNum();
1251: final byte abortType = LogEntryType.LOG_TXN_ABORT.getTypeNum();
1252: data.position(0);
1253:
1254: while (data.remaining() > 0) {
1255: int recStartPos = data.position();
1256: LogEntryHeader header = new LogEntryHeader(envImpl, data,
1257: false); // anticipateChecksumErrors
1258:
1259: if (header.getType() == commitType) {
1260: /* Change the log entry type, and recalculate the checksum. */
1261: header.convertCommitToAbort(data);
1262: }
1263:
1264: data.position(recStartPos + header.getSize()
1265: + header.getItemSize());
1266: }
1267: data.position(0);
1268: }
1269:
1270: /**
1271: * FSync the end of the log.
1272: */
1273: void syncLogEnd() throws DatabaseException {
1274:
1275: try {
1276: endOfLog.force();
1277: } catch (IOException e) {
1278: throw new RunRecoveryException(envImpl,
1279: "IOException during fsync", e);
1280: }
1281: }
1282:
1283: /**
1284: * Sync the end of the log, close off this log file. Should only be called
1285: * under the log write latch.
1286: */
1287: void syncLogEndAndFinishFile() throws DatabaseException,
1288: IOException {
1289:
1290: if (syncAtFileEnd) {
1291: syncLogEnd();
1292: }
1293: endOfLog.close();
1294: }
1295:
1296: /**
1297: * Flush a file using the group sync mechanism, trying to amortize off
1298: * other syncs.
1299: */
1300: void groupSync() throws DatabaseException {
1301:
1302: syncManager.fsync();
1303: }
1304:
1305: /**
1306: * Close all file handles and empty the cache.
1307: */
1308: public void clear() throws IOException, DatabaseException {
1309:
1310: fileCacheLatch.acquire();
1311: try {
1312: fileCache.clear();
1313: } finally {
1314: fileCacheLatch.release();
1315: }
1316:
1317: endOfLog.close();
1318: }
1319:
1320: /**
1321: * Clear the file lock.
1322: */
1323: public void close() throws IOException, DatabaseException {
1324:
1325: if (envLock != null) {
1326: envLock.release();
1327: }
1328:
1329: if (exclLock != null) {
1330: exclLock.release();
1331: }
1332:
1333: if (channel != null) {
1334: channel.close();
1335: }
1336:
1337: if (lockFile != null) {
1338: lockFile.close();
1339: lockFile = null;
1340: }
1341: }
1342:
1343: /**
1344: * Lock the environment. Return true if the lock was acquired. If
1345: * exclusive is false, then this implements a single writer, multiple
1346: * reader lock. If exclusive is true, then implement an exclusive lock.
1347: *
1348: * There is a lock file and there are two regions of the lock file: byte 0,
1349: * and byte 1. Byte 0 is the exclusive writer process area of the lock
1350: * file. If an environment is opened for write, then it attempts to take
1351: * an exclusive write lock on byte 0. Byte 1 is the shared reader process
1352: * area of the lock file. If an environment is opened for read-only, then
1353: * it attempts to take a shared lock on byte 1. This is how we implement
1354: * single writer, multi reader semantics.
1355: *
1356: * The cleaner, each time it is invoked, attempts to take an exclusive lock
1357: * on byte 1. The owning process already either has an exclusive lock on
1358: * byte 0, or a shared lock on byte 1. This will necessarily conflict with
1359: * any shared locks on byte 1, even if it's in the same process and there
1360: * are no other holders of that shared lock. So if there is only one
1361: * read-only process, it will have byte 1 for shared access, and the
1362: * cleaner can not run in it because it will attempt to get an exclusive
1363: * lock on byte 1 (which is already locked for shared access by itself).
1364: * If a write process comes along and tries to run the cleaner, it will
1365: * attempt to get an exclusive lock on byte 1. If there are no other
1366: * reader processes (with shared locks on byte 1), and no other writers
1367: * (which are running cleaners on with exclusive locks on byte 1), then the
1368: * cleaner will run.
1369: */
1370: public boolean lockEnvironment(boolean readOnly, boolean exclusive)
1371: throws DatabaseException {
1372:
1373: try {
1374: if (checkEnvHomePermissions(readOnly)) {
1375: return true;
1376: }
1377:
1378: if (lockFile == null) {
1379: lockFile = new RandomAccessFile(new File(dbEnvHome,
1380: LOCK_FILE), FileMode.READWRITE_MODE
1381: .getModeValue());
1382: }
1383:
1384: channel = lockFile.getChannel();
1385:
1386: boolean throwIt = false;
1387: try {
1388: if (exclusive) {
1389:
1390: /*
1391: * To lock exclusive, must have exclusive on
1392: * shared reader area (byte 1).
1393: */
1394: exclLock = channel.tryLock(1, 1, false);
1395: if (exclLock == null) {
1396: return false;
1397: }
1398: return true;
1399: } else {
1400: if (readOnly) {
1401: envLock = channel.tryLock(1, 1, true);
1402: } else {
1403: envLock = channel.tryLock(0, 1, false);
1404: }
1405: if (envLock == null) {
1406: throwIt = true;
1407: }
1408: }
1409: } catch (OverlappingFileLockException e) {
1410: throwIt = true;
1411: }
1412: if (throwIt) {
1413: throw new LogException("A " + LOCK_FILE
1414: + " file exists in "
1415: + dbEnvHome.getAbsolutePath()
1416: + " The environment can not be locked for "
1417: + (readOnly ? "shared" : "single writer")
1418: + " access.");
1419: }
1420: } catch (IOException IOE) {
1421: throw new LogException(IOE.toString());
1422: }
1423: return true;
1424: }
1425:
1426: public void releaseExclusiveLock() throws DatabaseException {
1427:
1428: try {
1429: if (exclLock != null) {
1430: exclLock.release();
1431: }
1432: } catch (IOException IOE) {
1433: throw new DatabaseException(IOE);
1434: }
1435: }
1436:
1437: /**
1438: * Ensure that if the environment home dir is on readonly media or in a
1439: * readonly directory that the environment has been opened for readonly
1440: * access.
1441: *
1442: * @return true if the environment home dir is readonly.
1443: */
1444: public boolean checkEnvHomePermissions(boolean readOnly)
1445: throws DatabaseException {
1446:
1447: boolean envDirIsReadOnly = !dbEnvHome.canWrite();
1448: if (envDirIsReadOnly && !readOnly) {
1449:
1450: /*
1451: * Use the absolute path in the exception message, to
1452: * make a mis-specified relative path problem more obvious.
1453: */
1454: throw new DatabaseException("The Environment directory "
1455: + dbEnvHome.getAbsolutePath()
1456: + " is not writable, but the "
1457: + "Environment was opened for read-write access.");
1458: }
1459:
1460: return envDirIsReadOnly;
1461: }
1462:
1463: /**
1464: * Truncate a log at this position. Used by recovery to a timestamp
1465: * utilities and by recovery to set the end-of-log position.
1466: *
1467: * <p>This method forces a new log file to be written next, if the last
1468: * file (the file truncated to) has an old version in its header. This
1469: * ensures that when the log is opened by an old version of JE, a version
1470: * incompatibility will be detected. [#11243]</p>
1471: */
1472: public void truncateLog(long fileNum, long offset)
1473: throws IOException, DatabaseException {
1474:
1475: FileHandle handle = makeFileHandle(fileNum,
1476: getAppropriateReadWriteMode());
1477: RandomAccessFile file = handle.getFile();
1478:
1479: try {
1480: file.getChannel().truncate(offset);
1481: } finally {
1482: file.close();
1483: }
1484:
1485: if (handle.isOldHeaderVersion()) {
1486: forceNewFile = true;
1487: }
1488: }
1489:
1490: /**
1491: * Set the flag that causes a new file to be written before the next write.
1492: */
1493: void forceNewLogFile() {
1494: forceNewFile = true;
1495: }
1496:
1497: /**
1498: * Return the offset of the first log entry after the file header.
1499: */
1500:
1501: /**
1502: * @return the size in bytes of the file header log entry.
1503: */
1504: public static int firstLogEntryOffset() {
1505: return FileHeader.entrySize() + LogEntryHeader.MIN_HEADER_SIZE;
1506: }
1507:
1508: /**
1509: * Return the next available LSN in the log. Note that this is
1510: * unsynchronized, so is only valid as an approximation of log size.
1511: */
1512: public long getNextLsn() {
1513: return nextAvailableLsn;
1514: }
1515:
1516: /**
1517: * Return the last allocated LSN in the log. Note that this is
1518: * unsynchronized, so if it is called outside the log write latch it is
1519: * only valid as an approximation of log size.
1520: */
1521: public long getLastUsedLsn() {
1522: return lastUsedLsn;
1523: }
1524:
1525: /*
1526: * fsync stats.
1527: */
1528: public long getNFSyncs() {
1529: return syncManager.getNFSyncs();
1530: }
1531:
1532: public long getNFSyncRequests() {
1533: return syncManager.getNFSyncRequests();
1534: }
1535:
1536: public long getNFSyncTimeouts() {
1537: return syncManager.getNTimeouts();
1538: }
1539:
1540: void loadStats(StatsConfig config, EnvironmentStats stats)
1541: throws DatabaseException {
1542:
1543: syncManager.loadStats(config, stats);
1544: }
1545:
1546: /*
1547: * Unit test support
1548: */
1549:
1550: /*
1551: * @return ids of files in cache
1552: */
1553: Set getCacheKeys() {
1554: return fileCache.getCacheKeys();
1555: }
1556:
1557: /**
1558: * Clear a file out of the file cache regardless of mode type.
1559: */
1560: private void clearFileCache(long fileNum) throws IOException,
1561: DatabaseException {
1562:
1563: fileCacheLatch.acquire();
1564: try {
1565: fileCache.remove(fileNum);
1566: } finally {
1567: fileCacheLatch.release();
1568: }
1569: }
1570:
1571: /*
1572: * The file cache keeps N RandomAccessFile objects cached for file
1573: * access. The cache consists of two parts: a Hashtable that doesn't
1574: * require extra synchronization, for the most common access, and a linked
1575: * list of files to support cache administration. Looking up a file from
1576: * the hash table doesn't require extra latching, but adding or deleting a
1577: * file does.
1578: */
1579: private static class FileCache {
1580: private Map fileMap; // Long->file
1581: private LinkedList fileList; // list of file numbers
1582: private int fileCacheSize;
1583:
1584: FileCache(DbConfigManager configManager)
1585: throws DatabaseException {
1586:
1587: /*
1588: * A fileMap maps the file number to FileHandles (RandomAccessFile,
1589: * latch). The fileList is a list of Longs to determine which files
1590: * to eject out of the file cache if it's too small.
1591: */
1592: fileMap = new Hashtable();
1593: fileList = new LinkedList();
1594: fileCacheSize = configManager
1595: .getInt(EnvironmentParams.LOG_FILE_CACHE_SIZE);
1596: }
1597:
1598: private FileHandle get(Long fileId) {
1599: return (FileHandle) fileMap.get(fileId);
1600: }
1601:
1602: private void add(Long fileId, FileHandle fileHandle)
1603: throws DatabaseException {
1604:
1605: /*
1606: * Does the cache have any room or do we have to evict? Hunt down
1607: * the file list for an unused file. Note that the file cache might
1608: * actually grow past the prescribed size if there is nothing
1609: * evictable. Should we try to shrink the file cache? Presently if
1610: * it grows, it doesn't shrink.
1611: */
1612: if (fileList.size() >= fileCacheSize) {
1613: Iterator iter = fileList.iterator();
1614: while (iter.hasNext()) {
1615: Long evictId = (Long) iter.next();
1616: FileHandle evictTarget = (FileHandle) fileMap
1617: .get(evictId);
1618:
1619: /*
1620: * Try to latch. If latchNoWait returns false, then another
1621: * thread owns this latch. Note that a thread that's trying
1622: * to get a new file handle should never already own the
1623: * latch on another file handle, because these latches are
1624: * meant to be short lived and only held over the i/o out
1625: * of the file.
1626: */
1627: if (evictTarget.latchNoWait()) {
1628: try {
1629: fileMap.remove(evictId);
1630: iter.remove();
1631: evictTarget.close();
1632: } catch (IOException e) {
1633: throw new DatabaseException(e);
1634: } finally {
1635: evictTarget.release();
1636: }
1637: break;
1638: }
1639: }
1640: }
1641:
1642: /*
1643: * We've done our best to evict. Add the file the the cache now
1644: * whether or not we did evict.
1645: */
1646: fileList.add(fileId);
1647: fileMap.put(fileId, fileHandle);
1648: }
1649:
1650: /**
1651: * Take any file handles corresponding to this file name out of the
1652: * cache. A file handle could be there twice, in rd only and in r/w
1653: * mode.
1654: */
1655: private void remove(long fileNum) throws IOException,
1656: DatabaseException {
1657:
1658: Iterator iter = fileList.iterator();
1659: while (iter.hasNext()) {
1660: Long evictId = (Long) iter.next();
1661: if (evictId.longValue() == fileNum) {
1662: FileHandle evictTarget = (FileHandle) fileMap
1663: .get(evictId);
1664: try {
1665: evictTarget.latch();
1666: fileMap.remove(evictId);
1667: iter.remove();
1668: evictTarget.close();
1669: } finally {
1670: evictTarget.release();
1671: }
1672: }
1673: }
1674: }
1675:
1676: private void clear() throws IOException, DatabaseException {
1677:
1678: Iterator iter = fileMap.values().iterator();
1679: while (iter.hasNext()) {
1680: FileHandle fileHandle = (FileHandle) iter.next();
1681: try {
1682: fileHandle.latch();
1683: fileHandle.close();
1684: iter.remove();
1685: } finally {
1686: fileHandle.release();
1687: }
1688: }
1689: fileMap.clear();
1690: fileList.clear();
1691: }
1692:
1693: private Set getCacheKeys() {
1694: return fileMap.keySet();
1695: }
1696: }
1697:
1698: /**
1699: * The LogEndFileDescriptor is used to write and fsync the end of the log.
1700: * Because the JE log is append only, there is only one logical R/W file
1701: * descriptor for the whole environment. This class actually implements two
1702: * RandomAccessFile instances, one for writing and one for fsyncing, so the
1703: * two types of operations don't block each other.
1704: *
1705: * The write file descriptor is considered the master. Manipulation of
1706: * this class is done under the log write latch. Here's an explanation of
1707: * why the log write latch is sufficient to safeguard all operations.
1708: *
1709: * There are two types of callers who may use this file descriptor: the
1710: * thread that is currently writing to the end of the log and any threads
1711: * that are fsyncing on behalf of the FSyncManager.
1712: *
1713: * The writing thread appends data to the file and fsyncs the file when we
1714: * flip over to a new log file. The file is only instantiated at the point
1715: * that it must do so -- which is either when the first fsync is required
1716: * by JE or when the log file is full and we flip files. Therefore, the
1717: * writing thread has two actions that change this descriptor -- we
1718: * initialize the file descriptor for the given log file at the first write
1719: * to the file, and we close the file descriptor when the log file is full.
1720: * Therefore is a period when there is no log descriptor -- when we have
1721: * not yet written a log buffer into a given log file.
1722: *
1723: * The fsyncing threads ask for the log end file descriptor asynchronously,
1724: * but will never modify it. These threads may arrive at the point when
1725: * the file descriptor is null, and therefore skip their fysnc, but that is
1726: * fine because it means a writing thread already flipped that target file
1727: * and has moved on to the next file.
1728: *
1729: * Time Activity
1730: * 10 thread 1 writes log entry A into file 0x0, issues fsync
1731: * outside of log write latch, yields the processor
1732: * 20 thread 2 writes log entry B, piggybacks off thread 1
1733: * 30 thread 3 writes log entry C, but no room left in that file,
1734: * so it flips the log, and fsyncs file 0x0, all under the log
1735: * write latch. It nulls out endOfLogRWFile, moves onto file
1736: * 0x1, but doesn't create the file yet.
1737: * 40 thread 1 finally comes along, but endOfLogRWFile is null--
1738: * no need to fsync in that case, 0x0 got fsynced.
1739: */
1740: class LogEndFileDescriptor {
1741: private RandomAccessFile endOfLogRWFile = null;
1742: private RandomAccessFile endOfLogSyncFile = null;
1743: private Object fsyncFileSynchronizer = new Object();
1744:
1745: /**
1746: * getWritableFile must be called under the log write latch.
1747: */
1748: RandomAccessFile getWritableFile(long fileNumber)
1749: throws RunRecoveryException {
1750:
1751: try {
1752:
1753: if (endOfLogRWFile == null) {
1754:
1755: /*
1756: * We need to make a file descriptor for the end of the
1757: * log. This is guaranteed to be called under the log
1758: * write latch.
1759: */
1760: endOfLogRWFile = makeFileHandle(fileNumber,
1761: getAppropriateReadWriteMode()).getFile();
1762: synchronized (fsyncFileSynchronizer) {
1763: endOfLogSyncFile = makeFileHandle(fileNumber,
1764: getAppropriateReadWriteMode())
1765: .getFile();
1766: }
1767: }
1768:
1769: return endOfLogRWFile;
1770: } catch (Exception e) {
1771:
1772: /*
1773: * If we can't get a write channel, we need to go into
1774: * RunRecovery state.
1775: */
1776: throw new RunRecoveryException(envImpl, e);
1777: }
1778: }
1779:
1780: /**
1781: * FSync the log file that makes up the end of the log.
1782: */
1783: void force() throws DatabaseException, IOException {
1784:
1785: /*
1786: * Get a local copy of the end of the log file descriptor, it could
1787: * change. No need to latch, no harm done if we get an old file
1788: * descriptor, because we forcibly fsync under the log write latch
1789: * when we switch files.
1790: *
1791: * If there is no current end file descriptor, we know that the log
1792: * file has flipped to a new file since the fsync was issued.
1793: */
1794: synchronized (fsyncFileSynchronizer) {
1795: RandomAccessFile file = endOfLogSyncFile;
1796: if (file != null) {
1797: bumpWriteCount("fsync");
1798: FileChannel channel = file.getChannel();
1799: try {
1800: channel.force(false);
1801: } catch (ClosedChannelException e) {
1802:
1803: /*
1804: * The channel should never be closed. It may be closed
1805: * because of an interrupt received by another
1806: * thread. See SR [#10463]
1807: */
1808: throw new RunRecoveryException(
1809: envImpl,
1810: "Channel closed, may be due to thread interrupt",
1811: e);
1812: }
1813:
1814: assert EnvironmentImpl.maybeForceYield();
1815: }
1816: }
1817: }
1818:
1819: /**
1820: * Close the end of the log file descriptor. Use atomic assignment to
1821: * ensure that we won't force and close on the same descriptor.
1822: */
1823: void close() throws IOException {
1824:
1825: IOException firstException = null;
1826: if (endOfLogRWFile != null) {
1827: RandomAccessFile file = endOfLogRWFile;
1828:
1829: /*
1830: * Null out so that other threads know endOfLogRWFile is no
1831: * longer available.
1832: */
1833: endOfLogRWFile = null;
1834: try {
1835: file.close();
1836: } catch (IOException e) {
1837: /* Save this exception, so we can try the second close. */
1838: firstException = e;
1839: }
1840: }
1841: synchronized (fsyncFileSynchronizer) {
1842: if (endOfLogSyncFile != null) {
1843: RandomAccessFile file = endOfLogSyncFile;
1844:
1845: /*
1846: * Null out so that other threads know endOfLogSyncFile is
1847: * no longer available.
1848: */
1849: endOfLogSyncFile = null;
1850: file.close();
1851: }
1852:
1853: if (firstException != null) {
1854: throw firstException;
1855: }
1856: }
1857: }
1858: }
1859:
1860: /*
1861: * Generate IOExceptions for testing.
1862: */
1863:
1864: /* Testing switch. */
1865: static boolean RUNRECOVERY_EXCEPTION_TESTING = false;
1866: /* Max write counter value. */
1867: private static final int RUNRECOVERY_EXCEPTION_MAX = 100;
1868: /* Current write counter value. */
1869: private int runRecoveryExceptionCounter = 0;
1870: /* Whether an exception has been thrown. */
1871: private boolean runRecoveryExceptionThrown = false;
1872: /* Random number generator. */
1873: private Random runRecoveryExceptionRandom = null;
1874:
1875: private void generateRunRecoveryException(RandomAccessFile file,
1876: ByteBuffer data, long destOffset) throws DatabaseException,
1877: IOException {
1878:
1879: if (runRecoveryExceptionThrown) {
1880: try {
1881: throw new Exception("Write after RunRecoveryException");
1882: } catch (Exception e) {
1883: e.printStackTrace();
1884: }
1885: }
1886: runRecoveryExceptionCounter += 1;
1887: if (runRecoveryExceptionCounter >= RUNRECOVERY_EXCEPTION_MAX) {
1888: runRecoveryExceptionCounter = 0;
1889: }
1890: if (runRecoveryExceptionRandom == null) {
1891: runRecoveryExceptionRandom = new Random(System
1892: .currentTimeMillis());
1893: }
1894: if (runRecoveryExceptionCounter == runRecoveryExceptionRandom
1895: .nextInt(RUNRECOVERY_EXCEPTION_MAX)) {
1896: int len = runRecoveryExceptionRandom.nextInt(data
1897: .remaining());
1898: if (len > 0) {
1899: byte[] a = new byte[len];
1900: data.get(a, 0, len);
1901: ByteBuffer buf = ByteBuffer.wrap(a);
1902: writeToFile(file, buf, destOffset);
1903: }
1904: runRecoveryExceptionThrown = true;
1905: throw new RunRecoveryException(envImpl,
1906: "Randomly generated for testing");
1907: }
1908: }
1909: }
|