Source Code Cross Referenced for LogToFile.java in  » Database-DBMS » db-derby-10.2 » org » apache » derby » impl » store » raw » log » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database DBMS » db derby 10.2 » org.apache.derby.impl.store.raw.log 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


0001:        /*
0002:
0003:           Derby - Class org.apache.derby.impl.store.raw.log.LogToFile
0004:
0005:           Licensed to the Apache Software Foundation (ASF) under one or more
0006:           contributor license agreements.  See the NOTICE file distributed with
0007:           this work for additional information regarding copyright ownership.
0008:           The ASF licenses this file to you under the Apache License, Version 2.0
0009:           (the "License"); you may not use this file except in compliance with
0010:           the License.  You may obtain a copy of the License at
0011:
0012:              http://www.apache.org/licenses/LICENSE-2.0
0013:
0014:           Unless required by applicable law or agreed to in writing, software
0015:           distributed under the License is distributed on an "AS IS" BASIS,
0016:           WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
0017:           See the License for the specific language governing permissions and
0018:           limitations under the License.
0019:
0020:         */
0021:
0022:        package org.apache.derby.impl.store.raw.log;
0023:
0024:        import org.apache.derby.iapi.services.diag.Performance;
0025:
0026:        import org.apache.derby.impl.store.raw.log.CheckpointOperation;
0027:        import org.apache.derby.impl.store.raw.log.LogCounter;
0028:        import org.apache.derby.impl.store.raw.log.LogRecord;
0029:        import org.apache.derby.impl.store.raw.log.StreamLogScan;
0030:
0031:        // need this to print nested exception that corrupts the database
0032:        import org.apache.derby.iapi.services.context.ErrorStringBuilder;
0033:
0034:        import org.apache.derby.iapi.services.info.ProductGenusNames;
0035:        import org.apache.derby.iapi.services.info.ProductVersionHolder;
0036:
0037:        import org.apache.derby.iapi.reference.MessageId;
0038:        import org.apache.derby.iapi.reference.Property;
0039:        import org.apache.derby.iapi.reference.SQLState;
0040:
0041:        import org.apache.derby.iapi.services.daemon.DaemonService;
0042:        import org.apache.derby.iapi.services.daemon.Serviceable;
0043:        import org.apache.derby.iapi.services.context.ContextManager;
0044:        import org.apache.derby.iapi.services.context.ContextService;
0045:        import org.apache.derby.iapi.services.context.ShutdownException;
0046:        import org.apache.derby.iapi.services.monitor.Monitor;
0047:        import org.apache.derby.iapi.services.monitor.ModuleControl;
0048:        import org.apache.derby.iapi.services.monitor.ModuleSupportable;
0049:        import org.apache.derby.iapi.services.monitor.PersistentService;
0050:        import org.apache.derby.iapi.services.sanity.SanityManager;
0051:        import org.apache.derby.iapi.services.io.Formatable;
0052:        import org.apache.derby.iapi.services.io.TypedFormat;
0053:        import org.apache.derby.iapi.services.io.FormatIdUtil;
0054:        import org.apache.derby.iapi.services.io.StoredFormatIds;
0055:        import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
0056:        import org.apache.derby.iapi.services.stream.PrintWriterGetHeader;
0057:        import org.apache.derby.iapi.services.stream.InfoStreams;
0058:        import org.apache.derby.iapi.error.StandardException;
0059:        import org.apache.derby.iapi.services.i18n.MessageService;
0060:        import org.apache.derby.iapi.store.access.AccessFactory;
0061:        import org.apache.derby.iapi.store.access.AccessFactoryGlobals;
0062:        import org.apache.derby.iapi.store.access.TransactionController;
0063:        import org.apache.derby.iapi.store.raw.Loggable;
0064:        import org.apache.derby.iapi.store.raw.RawStoreFactory;
0065:        import org.apache.derby.iapi.store.raw.ScanHandle;
0066:        import org.apache.derby.iapi.store.raw.log.LogFactory;
0067:        import org.apache.derby.iapi.store.raw.log.Logger;
0068:        import org.apache.derby.iapi.store.raw.log.LogInstant;
0069:        import org.apache.derby.iapi.store.raw.log.LogScan;
0070:        import org.apache.derby.iapi.store.raw.Transaction;
0071:        import org.apache.derby.iapi.store.raw.xact.RawTransaction;
0072:        import org.apache.derby.iapi.store.raw.xact.TransactionFactory;
0073:        import org.apache.derby.iapi.store.raw.data.DataFactory;
0074:        import org.apache.derby.iapi.services.property.PersistentSet;
0075:
0076:        import org.apache.derby.iapi.store.access.DatabaseInstant;
0077:        import org.apache.derby.catalog.UUID;
0078:        import org.apache.derby.iapi.services.uuid.UUIDFactory;
0079:        import org.apache.derby.iapi.services.property.PropertyUtil;
0080:        import org.apache.derby.iapi.reference.Attribute;
0081:        import org.apache.derby.iapi.services.io.FileUtil;
0082:        import org.apache.derby.iapi.util.ReuseFactory;
0083:
0084:        import org.apache.derby.io.StorageFactory;
0085:        import org.apache.derby.io.WritableStorageFactory;
0086:        import org.apache.derby.io.StorageFile;
0087:        import org.apache.derby.io.StorageRandomAccessFile;
0088:
0089:        import java.io.File; // Plain files are used for backups
0090:        import java.io.IOException;
0091:        import java.io.SyncFailedException;
0092:        import java.io.ByteArrayOutputStream;
0093:        import java.io.DataOutputStream;
0094:        import java.io.ByteArrayInputStream;
0095:        import java.io.DataInputStream;
0096:        import java.io.FileNotFoundException;
0097:
0098:        import java.net.MalformedURLException;
0099:        import java.net.URL;
0100:
0101:        import java.util.Properties;
0102:        import java.util.Vector;
0103:        import java.util.zip.CRC32;
0104:
0105:        /**
0106:
0107:         This is an implementation of the log using a non-circular file system file.
0108:         No support for incremental log backup or media recovery.
0109:         Only crash recovery is supported.  
0110:         <P>
0111:         The 'log' is a stream of log records.  The 'log' is implemented as
0112:         a series of numbered log files.  These numbered log files are logically
0113:         continuous so a transaction can have log records that span multiple log files.
0114:         A single log record cannot span more then one log file.  The log file number
0115:         is monotonically increasing.
0116:         <P>
0117:         The log belongs to a log factory of a RawStore.  In the current implementation,
0118:         each RawStore only has one log factory, so each RawStore only has one log
0119:         (which composed of multiple log files).
0120:         At any given time, a log factory only writes new log records to one log file,
0121:         this log file is called the 'current log file'.
0122:         <P>
0123:         A log file is named log<em>logNumber</em>.dat
0124:         <P>
0125:         Everytime a checkpoint is taken, a new log file is created and all subsequent
0126:         log records will go to the new log file.  After a checkpoint is taken, old
0127:         and useless log files will be deleted.
0128:         <P>
0129:         RawStore exposes a checkpoint method which clients can call, or a checkpoint is
0130:         taken automatically by the RawStore when
0131:         <OL>
0132:         <LI> the log file grows beyond a certain size (configurable, default 100K bytes)
0133:         <LI> RawStore is shutdown and a checkpoint hasn't been done "for a while"
0134:         <LI> RawStore is recovered and a checkpoint hasn't been done "for a while"
0135:         </OL>
0136:         <P>
0137:         This LogFactory is responsible for the formats of 2 kinds of file: the log
0138:         file and the log control file.  And it is responsible for the format of the
0139:         log record wrapper.
0140:         <P> <PRE>
0141:
0142:         Format of log control file 
0143:
0144:         @format_id	FILE_STREAM_LOG_FILE
0145:         @purpose	The log control file contains information about which log files
0146:         are present and where the last checkpoint log record is located.
0147:         @upgrade	
0148:         @disk_layout
0149:         (pre-v15)
0150:         int format id
0151:         int log file version
0152:         long the log instant (LogCounter) of the last completed checkpoint
0153:         (v15 onward)
0154:         int format id
0155:         int obsolete log file version
0156:         long the log instant (LogCounter) of the last completed checkpoint
0157:         int JBMS version
0158:         int checkpoint interval
0159:         long spare (value set to 0)
0160:         long spare (value set to 0)
0161:         long spare (value set to 0)
0162:
0163:         @end_format
0164:         </PRE>	
0165:         <HR WIDTH="100%">
0166:         <PRE>
0167:
0168:         Format of the log file
0169:
0170:         @format_id	FILE_STREAM_LOG_FILE
0171:         @purpose	The log file contains log record which record all the changes
0172:         to the database.  The complete transaction log is composed of a series of
0173:         log files.
0174:         @upgrade
0175:         @disk_layout
0176:         int format id - 	the format Id of this log file
0177:         int obsolete log file version - not used
0178:         long log file number - this number orders the log files in a
0179:         series to form the complete transaction log
0180:         long prevLogRecord - log instant of the previous log record, in the
0181:         previous log file. 
0182:
0183:         [log record wrapper]* one or more log records with wrapper
0184:
0185:         int endMarker - value of zero.  The beginning of a log record wrapper
0186:         is the length of the log record, therefore it is never zero
0187:         [int fuzzy end]* zero or more int's of value 0, in case this log file
0188:         has been recovered and any incomplete log record set to zero.
0189:         @end_format
0190:         </PRE>	
0191:         <HR WIDTH="100%">
0192:         <PRE>
0193:
0194:         Format of the log record wrapper
0195:
0196:         @format_id none.  The format is implied by the FILE_STREAM_LOG_FILE
0197:         @purpose	The log record wrapper provides information for the log scan.
0198:         @upgrade
0199:         @disk_layout
0200:         length(int) length of the log record (for forward scan)
0201:         instant(long) LogInstant of the log record
0202:         logRecord(byte[length]) byte array that is written by the FileLogger
0203:         length(int) length of the log record (for backward scan)
0204:         @end_format
0205:         </PRE>
0206:
0207:
0208:         <P>Multithreading considerations:<BR>
0209:         Log Factory must be MT-safe.
0210:         <P>
0211:         Class is final as it has methods with privilege blocks
0212:         and implements PrivilegedExceptionAction.
0213:         */
0214:
0215:        public final class LogToFile implements  LogFactory, ModuleControl,
0216:                ModuleSupportable, Serviceable,
0217:                java.security.PrivilegedExceptionAction {
0218:
0219:            private static int fid = StoredFormatIds.FILE_STREAM_LOG_FILE;
0220:
0221:            // format Id must fit in 4 bytes
0222:
0223:            /**
0224:            	Return my format identifier.
0225:             */
0226:            public int getTypeFormatId() {
0227:                return StoredFormatIds.FILE_STREAM_LOG_FILE;
0228:            }
0229:
0230:            // at the beginning of every log file is the following information:
0231:            // the log file formatId
0232:            // the log file version (int)
0233:            // the log file number (long)
0234:            // the log instant at the end of the last log record in the previous file (long)
0235:            public static final int LOG_FILE_HEADER_SIZE = 24;
0236:
0237:            protected static final int LOG_FILE_HEADER_PREVIOUS_LOG_INSTANT_OFFSET = LOG_FILE_HEADER_SIZE - 8;
0238:
0239:            // Number of bytes overhead of each log record.
0240:            // 4 bytes of length at the beginning, 8 bytes of log instant,
0241:            // and 4 bytes of length at the end
0242:            public static final int LOG_RECORD_OVERHEAD = 4 + 8 + 4;
0243:
0244:            public static final String DBG_FLAG = SanityManager.DEBUG ? "LogTrace"
0245:                    : null;
0246:            public static final String DUMP_LOG_ONLY = SanityManager.DEBUG ? "DumpLogOnly"
0247:                    : null;
0248:            public static final String DUMP_LOG_FROM_LOG_FILE = SanityManager.DEBUG ? "derby.storage.logDumpStart"
0249:                    : null;
0250:            protected static final String LOG_SYNC_STATISTICS = "LogSyncStatistics";
0251:
0252:            // If you change this number, then JBMS 1.1x and 1.2x will give a really
0253:            // horrendous error message when booting against a db created by you.  When
0254:            // we decided that we don't need to worry about people mis-using the
0255:            // product that way, then we can change this.  Just remember, before we do,
0256:            // all existing database will have the number 9 in there.
0257:            private static final int OBSOLETE_LOG_VERSION_NUMBER = 9;
0258:
0259:            /* how big the log file should be before checkpoint or log switch is taken */
0260:            private static final int DEFAULT_LOG_SWITCH_INTERVAL = 1024 * 1024;
0261:            private static final int LOG_SWITCH_INTERVAL_MIN = 100000;
0262:            private static final int LOG_SWITCH_INTERVAL_MAX = 128 * 1024 * 1024;
0263:            private static final int CHECKPOINT_INTERVAL_MIN = 100000;
0264:            private static final int CHECKPOINT_INTERVAL_MAX = 128 * 1024 * 1024;
0265:            private static final int DEFAULT_CHECKPOINT_INTERVAL = 10 * 1024 * 1024;
0266:
0267:            //log buffer size values
0268:            private static final int DEFAULT_LOG_BUFFER_SIZE = 32768; //32K
0269:            private static final int LOG_BUFFER_SIZE_MIN = 8192; //8k
0270:            private static final int LOG_BUFFER_SIZE_MAX = LOG_SWITCH_INTERVAL_MAX;
0271:            private int logBufferSize = DEFAULT_LOG_BUFFER_SIZE;
0272:
0273:            /* Log Control file flags. */
0274:            private static final byte IS_BETA_FLAG = 0x1;
0275:
0276:            /**
0277:             * When the derby.system.durability property is set to 'test', the store 
0278:             * system will not force sync calls in the following cases
0279:             * - for the log file at each commit
0280:             * - for the log file before data page is forced to disk
0281:             * - for page allocation when file is grown
0282:             * - for data writes during checkpoint
0283:             * This means it is possible that the recovery system may not work properly,
0284:             * committed transactions may be lost, and/or database may not
0285:             * be in a consistent state.
0286:             * In order that we recognize this case that the database was previously
0287:             * at any time booted in this mode, this value is written out
0288:             * into the log control file. This will help prevent us from 
0289:             * wasting time to resolve issues in such cases. 
0290:             * @see org.apache.derby.iapi.reference.Property#DURABILITY_PROPERTY
0291:             * This value is written as part of the log control file flags byte.
0292:             */
0293:            private static final byte IS_DURABILITY_TESTMODE_NO_SYNC_FLAG = 0x2;
0294:
0295:            /**
0296:             * keeps track of if the database was booted previously at any time with 
0297:             * derby.system.durability=test
0298:             */
0299:            private static boolean wasDBInDurabilityTestModeNoSync = false;
0300:
0301:            /* to err on the conservative side, unless otherwise set, assume log
0302:             *	archive is ON 
0303:             */
0304:            private static final String DEFAULT_LOG_ARCHIVE_DIRECTORY = "DEFAULT";
0305:
0306:            private int logSwitchInterval = DEFAULT_LOG_SWITCH_INTERVAL;
0307:            private int checkpointInterval = DEFAULT_CHECKPOINT_INTERVAL;
0308:
0309:            String dataDirectory; // where files are stored
0310:            private WritableStorageFactory logStorageFactory;
0311:
0312:            private boolean logBeingFlushed; // is the log in the middle of a flush
0313:            // (access of the variable should sync on this)
0314:
0315:            protected LogAccessFile logOut; // an output stream to the log file
0316:            // (access of the variable should sync on this)
0317:            private StorageRandomAccessFile firstLog = null;
0318:            protected long endPosition = -1; // end position of the current log file
0319:            long lastFlush = 0; // the position in the current log
0320:            // file that has been flushed to disk
0321:
0322:            long logFileNumber = -1; // current log file number
0323:            // other than during boot and recovery time,
0324:            // logFileNumber is only changed by
0325:            // switchLogFile, which is synchronized.
0326:            // 
0327:            // MT - Anyone accessing this number should
0328:            // synchronized on this if the current log file
0329:            // must not be changed. If not synchronized,
0330:            // the log file may have been switched.
0331:
0332:            long firstLogFileNumber = -1;
0333:            // first log file that makes up the active
0334:            // portion (with active transactions) of the
0335:            // log.  
0336:            // 
0337:            // MT - This value is set during recovery or
0338:            // during log truncation.  In the former single
0339:            // thread is assumed.  In the latter
0340:            // must be synchronized with this to access
0341:            // or change.
0342:
0343:            private long maxLogFileNumber = LogCounter.MAX_LOGFILE_NUMBER;
0344:            private CheckpointOperation currentCheckpoint;
0345:            // last checkpoint successfully taken
0346:            // 
0347:            // MT - only changed or access in recovery or
0348:            // checkpoint, both are single thread access
0349:
0350:            long checkpointInstant;
0351:            // log instant of te curerntCheckpoint
0352:
0353:            private DaemonService checkpointDaemon; // the background worker thread who is going to
0354:            // do checkpoints for this log factory.
0355:
0356:            private int myClientNumber;
0357:            // use this number to talk to checkpoint Daemon
0358:
0359:            private volatile boolean checkpointDaemonCalled;
0360:            // checkpoint Daemon called already - it is not
0361:            // important that this value is correct, the
0362:            // daemon just need to be called once in a
0363:            // while.  Deamon can handle multiple posts.
0364:
0365:            private long logWrittenFromLastCheckPoint = 0;
0366:            // keeps track of the amout of log written between checkpoints
0367:            private RawStoreFactory rawStoreFactory;
0368:            // use this only after recovery is finished
0369:
0370:            protected DataFactory dataFactory;
0371:            // use this only after revocery is finished
0372:
0373:            protected boolean ReadOnlyDB; // true if this db is read only, i.e, cannot
0374:            // append log records
0375:
0376:            // DEBUG DEBUG - do not truncate log files
0377:            private boolean keepAllLogs;
0378:
0379:            // if database is encrypted, the content of the log files are encrypted
0380:            private boolean databaseEncrypted;
0381:
0382:            // the following booleans are used to put the log factory into various
0383:            // states
0384:            private boolean recoveryNeeded = true; // log needs to be recovered
0385:            private boolean inCheckpoint = false; // in the middle of a checkpoint
0386:            private boolean inRedo = false; // in the middle of redo loop
0387:            private boolean inLogSwitch = false;
0388:
0389:            // make sure we don't do anything after the log factory has been stopped
0390:            private boolean stopped = false;
0391:
0392:            // if log is to go to another device, this variable is set.  If null, then
0393:            // log goes to the log subdirectory underneath the data directory
0394:            String logDevice;
0395:
0396:            // disable syncing of log file when running in derby.system.durability=test
0397:            private boolean logNotSynced = false;
0398:
0399:            private volatile boolean logArchived = false;
0400:            private boolean logSwitchRequired = false;
0401:
0402:            /** DEBUG test only */
0403:            int test_logWritten = 0;
0404:            int test_numRecordToFillLog = -1;
0405:            private int mon_flushCalls;
0406:            private int mon_syncCalls;
0407:            private int mon_numLogFlushWaits;
0408:            private boolean mon_LogSyncStatistics;
0409:            private int mon_numBytesToLog;
0410:
0411:            /**
0412:            	If not null then something is corrupt in the raw store and this represents the original error.
0413:             */
0414:            protected volatile StandardException corrupt;
0415:
0416:            /**
0417:            	If frozen, don't allow anything on disk to change.
0418:             */
0419:            private boolean isFrozen;
0420:
0421:            /**
0422:              Product Version information. Invarient after boot.
0423:             */
0424:            ProductVersionHolder jbmsVersion;
0425:
0426:            /**
0427:            	On disk database version information. When running in soft upgrade this version
0428:            	may be different to jbmsVersion.
0429:             */
0430:            private int onDiskMajorVersion;
0431:            private int onDiskMinorVersion;
0432:            private boolean onDiskBeta;
0433:
0434:            private CRC32 checksum = new CRC32(); // holder for the checksum
0435:
0436:            /**
0437:             * Note: Why logging system support file sync and write sync ?
0438:             * Note : The reason to support file and write sync of logs is 
0439:             * there was no support to do write sync until jdk1.4 and then
0440:             * there was write sync jvm bug in jdk1.4.1, only in jdk1.4.2 write 
0441:             * sync(rws mode)  mechanism can be used corretly.
0442:             * Default in JVMS >= jdk1.4.2 is write sync(see the boot method for jvm checks).
0443:             *
0444:             * Write sync mechanism support is added  for performance reasons. 
0445:             * On commits, logging system has to make sure the log for committed
0446:             * transaction is on disk. With out write  sync , log is written to the 
0447:             * disk and then fsync() is used on commits to make log is written to the 
0448:             * disk for sure. On most of the OS , fsync() calls are expensive. 
0449:             * On heavey commit oriented systems , file sync make the system run slow. 
0450:             * This problem is solved by using write sync on preallocated log file. 
0451:             * write sync is much faster than doing write and file sync to a file. 
0452:             * File should be preallocated for write syncs to perform better than
0453:             * the file sync method. Whenever a new log file is created, 
0454:             * logSwitchInterval size is preallocated by writing zeros after file after the header. 
0455:             */
0456:
0457:            /*If set to true , write sync will be used to do log write other file 
0458:             * level sync is used.
0459:             */
0460:            private boolean isWriteSynced = false;
0461:
0462:            // log file that is yet to be copied to backup, updates to this variable 
0463:            // needs to visible  checkpoint thread. 
0464:            private volatile long logFileToBackup;
0465:            // It is set to true when  online backup is in progress,  updates to 
0466:            // this variable needs to visible to checkpoint thread. 
0467:            private volatile boolean backupInProgress = false;
0468:
0469:            /**
0470:            	MT- not needed for constructor
0471:             */
0472:            public LogToFile() {
0473:                keepAllLogs = PropertyUtil
0474:                        .getSystemBoolean(RawStoreFactory.KEEP_TRANSACTION_LOG);
0475:
0476:                if (Performance.MEASURE)
0477:                    mon_LogSyncStatistics = PropertyUtil
0478:                            .getSystemBoolean(LOG_SYNC_STATISTICS);
0479:            }
0480:
0481:            /*
0482:             ** Methods of Corruptable
0483:             */
0484:
0485:            /**
0486:            	Once the log factory is makred as corrupt then the raw sto
0487:             */
0488:            public StandardException markCorrupt(StandardException originalError) {
0489:
0490:                boolean firsttime = false;
0491:
0492:                synchronized (this ) {
0493:                    if (corrupt == null && originalError != null) {
0494:                        corrupt = originalError;
0495:                        firsttime = true;
0496:                    }
0497:                }
0498:
0499:                // only print the first error
0500:                if (corrupt == originalError)
0501:                    logErrMsg(corrupt);
0502:
0503:                // this is the first time someone detects error, shutdown the
0504:                // system as much as possible without further damaging it
0505:                if (firsttime) {
0506:                    synchronized (this ) {
0507:                        stopped = true;
0508:
0509:                        if (logOut != null) {
0510:                            try {
0511:                                logOut.corrupt(); // get rid of open file descriptor
0512:                            } catch (IOException ioe) {
0513:                                // don't worry about it, just trying to clean up 
0514:                            }
0515:                        }
0516:
0517:                        // NullPointerException is preferred over corrupting the database
0518:                        logOut = null;
0519:                    }
0520:
0521:                    if (dataFactory != null)
0522:                        dataFactory.markCorrupt(null);
0523:
0524:                }
0525:
0526:                return originalError;
0527:            }
0528:
0529:            private void checkCorrupt() throws StandardException {
0530:                synchronized (this ) {
0531:                    if (corrupt != null) {
0532:                        throw StandardException.newException(
0533:                                SQLState.LOG_STORE_CORRUPT, corrupt);
0534:                    }
0535:                }
0536:            }
0537:
0538:            /*
0539:             ** Methods of LogFactory
0540:             */
0541:
0542:            /**
0543:            	MT- not needed
0544:             */
0545:            public Logger getLogger() {
0546:
0547:                if (ReadOnlyDB)
0548:                    return null;
0549:                else
0550:                    return new FileLogger(this );
0551:            }
0552:
0553:            /**
0554:            	Recover the rawStore to a consistent state using the log.
0555:
0556:            	<P>
0557:            	In this implementation, the log is a stream of log records stored in
0558:            	one or more flat files.  Recovery is done in 2 passes: redo and undo.
0559:            	<BR> <B>Redo pass</B>
0560:            	<BR> In the redo pass, reconstruct the state of the rawstore by
0561:            	repeating exactly what happened before as recorded in the log.
0562:            	<BR><B>Undo pass</B>
0563:            	<BR> In the undo pass, all incomplete transactions are rolled back in
0564:            	the order from the most recently started to the oldest.
0565:
0566:            	<P>MT - synchronization provided by caller - RawStore boot.
0567:            	This method is guaranteed to be the only method being called and can
0568:            	assume single thread access on all fields.
0569:
0570:            	@see Loggable#needsRedo
0571:            	@see FileLogger#redo
0572:
0573:            	@exception StandardException Standard Cloudscape error policy
0574:             */
0575:            public void recover(RawStoreFactory rsf, DataFactory df,
0576:                    TransactionFactory tf) throws StandardException {
0577:                if (SanityManager.DEBUG) {
0578:                    SanityManager.ASSERT(rsf != null,
0579:                            "raw store factory == null");
0580:                    SanityManager.ASSERT(df != null, "data factory == null");
0581:                }
0582:
0583:                checkCorrupt();
0584:
0585:                rawStoreFactory = rsf;
0586:                dataFactory = df;
0587:
0588:                // initialize the log writer only after the rawstorefactory is available, 
0589:                // log writer requires encryption block size info from rawstore factory 
0590:                // to encrypt checksum log records. 
0591:                if (firstLog != null)
0592:                    logOut = new LogAccessFile(this , firstLog, logBufferSize);
0593:
0594:                // we don't want to set ReadOnlyDB before recovery has a chance to look
0595:                // at the latest checkpoint and determine that the database is shutdown
0596:                // cleanly.  If the medium is read only but there are logs that need
0597:                // to be redone or in flight transactions, we are hosed.  The logs that
0598:                // are redone will leave dirty pages in the cache.
0599:
0600:                if (recoveryNeeded) {
0601:                    try {
0602:                        /////////////////////////////////////////////////////////////
0603:                        //
0604:                        // During boot time, the log control file is accessed and
0605:                        // logFileNumber is determined.  LogOut is not set up.
0606:                        // LogFileNumber is the log file the latest checkpoint lives in,
0607:                        // or 1.  It may not be the latest log file (the system may have
0608:                        // crashed between the time a new log was generated and the
0609:                        // checkpoint log written), that can only be determined at the
0610:                        // end of recovery redo.
0611:                        //
0612:                        /////////////////////////////////////////////////////////////
0613:
0614:                        FileLogger logger = (FileLogger) getLogger();
0615:
0616:                        /////////////////////////////////////////////////////////////
0617:                        //
0618:                        // try to find the most recent checkpoint 
0619:                        //
0620:                        /////////////////////////////////////////////////////////////
0621:                        if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT) {
0622:                            currentCheckpoint = findCheckpoint(
0623:                                    checkpointInstant, logger);
0624:                        }
0625:
0626:                        // if we are only interested in dumping the log, start from the
0627:                        // beginning of the first log file
0628:                        if (SanityManager.DEBUG) {
0629:                            if (SanityManager.DEBUG_ON(DUMP_LOG_ONLY)) {
0630:                                currentCheckpoint = null;
0631:
0632:                                System.out.println("Dump log only");
0633:
0634:                                // unless otherwise specified, 1st log file starts at 1
0635:                                String beginLogFileNumber = PropertyUtil
0636:                                        .getSystemProperty(DUMP_LOG_FROM_LOG_FILE);
0637:
0638:                                if (beginLogFileNumber != null) {
0639:                                    logFileNumber = Long.valueOf(
0640:                                            beginLogFileNumber).longValue();
0641:                                } else {
0642:                                    logFileNumber = 1;
0643:                                }
0644:                            }
0645:                        }
0646:
0647:                        if (SanityManager.DEBUG) {
0648:                            if (SanityManager.DEBUG_ON("setCheckpoint")) {
0649:                                currentCheckpoint = null;
0650:
0651:                                System.out.println("Set Checkpoint.");
0652:
0653:                                // unless otherwise specified, 1st log file starts at 1
0654:                                String checkpointStartLogStr = PropertyUtil
0655:                                        .getSystemProperty("derby.storage.checkpointStartLog");
0656:
0657:                                String checkpointStartOffsetStr = PropertyUtil
0658:                                        .getSystemProperty("derby.storage.checkpointStartOffset");
0659:
0660:                                if ((checkpointStartLogStr != null)
0661:                                        && (checkpointStartOffsetStr != null)) {
0662:                                    checkpointInstant = LogCounter
0663:                                            .makeLogInstantAsLong(Long.valueOf(
0664:                                                    checkpointStartLogStr)
0665:                                                    .longValue(), Long.valueOf(
0666:                                                    checkpointStartOffsetStr)
0667:                                                    .longValue());
0668:                                } else {
0669:                                    SanityManager
0670:                                            .THROWASSERT("must set derby.storage.checkpointStartLog and derby.storage.checkpointStartOffset, if setting setCheckpoint.");
0671:                                }
0672:
0673:                                currentCheckpoint = findCheckpoint(
0674:                                        checkpointInstant, logger);
0675:                            }
0676:                        }
0677:
0678:                        long redoLWM = LogCounter.INVALID_LOG_INSTANT;
0679:                        long undoLWM = LogCounter.INVALID_LOG_INSTANT;
0680:                        long ttabInstant = LogCounter.INVALID_LOG_INSTANT;
0681:
0682:                        StreamLogScan redoScan = null;
0683:                        if (currentCheckpoint != null) {
0684:                            Formatable transactionTable = null;
0685:
0686:                            // RESOLVE: sku 
0687:                            // currentCheckpoint.getTransactionTable();
0688:
0689:                            // need to set the transaction table before the undo
0690:                            tf.useTransactionTable(transactionTable);
0691:
0692:                            redoLWM = currentCheckpoint.redoLWM();
0693:                            undoLWM = currentCheckpoint.undoLWM();
0694:
0695:                            if (transactionTable != null)
0696:                                ttabInstant = checkpointInstant;
0697:
0698:                            if (SanityManager.DEBUG) {
0699:                                if (SanityManager.DEBUG_ON(DBG_FLAG)) {
0700:                                    SanityManager
0701:                                            .DEBUG(
0702:                                                    DBG_FLAG,
0703:                                                    "Found checkpoint at "
0704:                                                            + LogCounter
0705:                                                                    .toDebugString(checkpointInstant)
0706:                                                            + " "
0707:                                                            + currentCheckpoint
0708:                                                                    .toString());
0709:                                }
0710:                            }
0711:
0712:                            firstLogFileNumber = LogCounter
0713:                                    .getLogFileNumber(redoLWM);
0714:
0715:                            // figure out where the first interesting log file is.
0716:                            if (LogCounter.getLogFileNumber(undoLWM) < firstLogFileNumber) {
0717:                                firstLogFileNumber = LogCounter
0718:                                        .getLogFileNumber(undoLWM);
0719:                            }
0720:
0721:                            // if the checkpoint record doesn't have a transaction
0722:                            // table, we need to rebuild it by scanning the log from
0723:                            // the undoLWM.  If it does have a transaction table, we
0724:                            // only need to scan the log from the redoLWM
0725:
0726:                            redoScan = (StreamLogScan) openForwardsScan(
0727:                                    undoLWM, (LogInstant) null);
0728:
0729:                        } else {
0730:                            // no checkpoint
0731:                            tf.useTransactionTable((Formatable) null);
0732:
0733:                            long start = LogCounter.makeLogInstantAsLong(
0734:                                    logFileNumber, LOG_FILE_HEADER_SIZE);
0735:
0736:                            // no checkpoint, start redo from the beginning of the 
0737:                            // file - assume this is the first log file
0738:                            firstLogFileNumber = logFileNumber;
0739:
0740:                            redoScan = (StreamLogScan) openForwardsScan(start,
0741:                                    (LogInstant) null);
0742:                        }
0743:
0744:                        // open a transaction that is used for redo and rollback
0745:                        RawTransaction recoveryTransaction = tf
0746:                                .startTransaction(rsf, ContextService
0747:                                        .getFactory()
0748:                                        .getCurrentContextManager(),
0749:                                        AccessFactoryGlobals.USER_TRANS_NAME);
0750:
0751:                        // make this transaction aware that it is a recovery transaction
0752:                        // and don't spew forth post commit work while replaying the log
0753:                        recoveryTransaction.recoveryTransaction();
0754:
0755:                        /////////////////////////////////////////////////////////////
0756:                        //
0757:                        //  Redo loop - in FileLogger
0758:                        //
0759:                        /////////////////////////////////////////////////////////////
0760:
0761:                        // 
0762:                        // set log factory state to inRedo so that if redo caused any
0763:                        // dirty page to be written from the cache, it won't flush the
0764:                        // log since the end of the log has not been determined and we
0765:                        // know the log record that caused the page to change has
0766:                        // already been written to the log.  We need the page write to
0767:                        // go thru the log factory because if the redo has a problem,
0768:                        // the log factory is corrupt and the only way we know not to
0769:                        // write out the page in a checkpoint is if it check with the
0770:                        // log factory, and that is done via a flush - we use the WAL
0771:                        // protocol to stop corrupt pages from writing to the disk.
0772:                        //
0773:                        inRedo = true;
0774:
0775:                        long logEnd = logger.redo(recoveryTransaction, tf,
0776:                                redoScan, redoLWM, ttabInstant);
0777:
0778:                        inRedo = false;
0779:
0780:                        // if we are only interested in dumping the log, don't alter
0781:                        // the database and prevent anyone from using the log
0782:                        if (SanityManager.DEBUG) {
0783:                            if (SanityManager.DEBUG_ON(LogToFile.DUMP_LOG_ONLY)) {
0784:                                Monitor
0785:                                        .logMessage("_____________________________________________________");
0786:                                Monitor.logMessage("\n\t\t Log dump finished");
0787:                                Monitor
0788:                                        .logMessage("_____________________________________________________");
0789:                                // just in case, it has not been set anyway
0790:                                logOut = null;
0791:
0792:                                return;
0793:                            }
0794:                        }
0795:
0796:                        /////////////////////////////////////////////////////////////
0797:                        //
0798:                        // determine where the log ends
0799:                        //
0800:                        /////////////////////////////////////////////////////////////
0801:                        StorageRandomAccessFile theLog = null;
0802:
0803:                        // if logend == LogCounter.INVALID_LOG_SCAN, that means there 
0804:                        // is no log record in the log - most likely it is corrupted in
0805:                        // some way ...
0806:                        if (logEnd == LogCounter.INVALID_LOG_INSTANT) {
0807:                            Monitor.logTextMessage(MessageId.LOG_LOG_NOT_FOUND);
0808:
0809:                            StorageFile logFile = getLogFileName(logFileNumber);
0810:
0811:                            if (privExists(logFile)) {
0812:                                // if we can delete this strange corrupted file, do so,
0813:                                // otherwise, skip it
0814:                                if (!privDelete(logFile)) {
0815:                                    logFile = getLogFileName(++logFileNumber);
0816:                                }
0817:                            }
0818:
0819:                            try {
0820:                                theLog = privRandomAccessFile(logFile, "rw");
0821:                            } catch (IOException ioe) {
0822:                                theLog = null;
0823:                            }
0824:
0825:                            if (theLog == null || !privCanWrite(logFile)) {
0826:                                if (theLog != null)
0827:                                    theLog.close();
0828:
0829:                                theLog = null;
0830:
0831:                                ReadOnlyDB = true;
0832:                            } else {
0833:                                try {
0834:                                    // no previous log file or previous log position
0835:                                    if (!initLogFile(theLog, logFileNumber,
0836:                                            LogCounter.INVALID_LOG_INSTANT)) {
0837:                                        throw markCorrupt(StandardException
0838:                                                .newException(
0839:                                                        SQLState.LOG_SEGMENT_NOT_EXIST,
0840:                                                        logFile.getPath()));
0841:                                    }
0842:                                } catch (IOException ioe) {
0843:                                    throw markCorrupt(StandardException
0844:                                            .newException(
0845:                                                    SQLState.LOG_IO_ERROR, ioe));
0846:                                }
0847:
0848:                                // successfully init'd the log file - set up markers,
0849:                                // and position at the end of the log.
0850:                                endPosition = theLog.getFilePointer();
0851:                                lastFlush = endPosition;
0852:
0853:                                //if write sync is true , prellocate the log file
0854:                                //and reopen the file in rws mode.
0855:                                if (isWriteSynced) {
0856:                                    //extend the file by wring zeros to it
0857:                                    preAllocateNewLogFile(theLog);
0858:                                    theLog.close();
0859:                                    theLog = openLogFileInWriteMode(logFile);
0860:                                    //postion the log at the current end postion
0861:                                    theLog.seek(endPosition);
0862:                                }
0863:
0864:                                if (SanityManager.DEBUG) {
0865:                                    SanityManager
0866:                                            .ASSERT(
0867:                                                    endPosition == LOG_FILE_HEADER_SIZE,
0868:                                                    "empty log file has wrong size");
0869:                                }
0870:
0871:                                //because we already incrementing the log number
0872:                                //here, no special log switch required for
0873:                                //backup recoveries.
0874:                                logSwitchRequired = false;
0875:                            }
0876:                        } else {
0877:                            // logEnd is the instant of the next log record in the log
0878:                            // it is used to determine the last known good position of
0879:                            // the log
0880:                            logFileNumber = LogCounter.getLogFileNumber(logEnd);
0881:
0882:                            ReadOnlyDB = df.isReadOnly();
0883:
0884:                            StorageFile logFile = getLogFileName(logFileNumber);
0885:
0886:                            if (!ReadOnlyDB) {
0887:                                // if datafactory doesn't think it is readonly, we can
0888:                                // do some futher test of our own
0889:                                try {
0890:                                    if (isWriteSynced)
0891:                                        theLog = openLogFileInWriteMode(logFile);
0892:                                    else
0893:                                        theLog = privRandomAccessFile(logFile,
0894:                                                "rw");
0895:                                } catch (IOException ioe) {
0896:                                    theLog = null;
0897:                                }
0898:                                if (theLog == null || !privCanWrite(logFile)) {
0899:                                    if (theLog != null)
0900:                                        theLog.close();
0901:                                    theLog = null;
0902:
0903:                                    ReadOnlyDB = true;
0904:                                }
0905:                            }
0906:
0907:                            if (!ReadOnlyDB) {
0908:                                endPosition = LogCounter
0909:                                        .getLogFilePosition(logEnd);
0910:
0911:                                //
0912:                                // The end of the log is at endPosition.  Which is where
0913:                                // the next log should be appending.
0914:                                //
0915:                                // if the last log record ends before the end of the
0916:                                // log file, then this log file has a fuzzy end.
0917:                                // Zap all the bytes to between endPosition to EOF to 0.
0918:                                //
0919:                                // the end log marker is 4 bytes (of zeros)
0920:                                //
0921:                                // if endPosition + 4 == logOut.length, we have a
0922:                                // properly terminated log file
0923:                                //
0924:                                // if endPosition + 4 is > logOut.length, there are 0,
0925:                                // 1, 2, or 3 bytes of 'fuzz' at the end of the log. We
0926:                                // can ignore that because it is guaranteed to be
0927:                                // overwritten by the next log record.
0928:                                //
0929:                                // if endPosition + 4 is < logOut.length, we have a
0930:                                // partial log record at the end of the log.
0931:                                //
0932:                                // We need to overwrite all of the incomplete log
0933:                                // record, because if we start logging but cannot
0934:                                // 'consume' all the bad log, then the log will truly
0935:                                // be corrupted if the next 4 bytes (the length of the
0936:                                // log record) after that is small enough that the next
0937:                                // time the database is recovered, it will be
0938:                                // interpreted that the whole log record is in the log
0939:                                // and will try to objectify, only to get classNotFound
0940:                                // error or worse.
0941:                                //
0942:
0943:                                //find out if log had incomplete log records at the end.
0944:                                if (redoScan.isLogEndFuzzy()) {
0945:                                    theLog.seek(endPosition);
0946:                                    long eof = theLog.length();
0947:
0948:                                    Monitor
0949:                                            .logTextMessage(
0950:                                                    MessageId.LOG_INCOMPLETE_LOG_RECORD,
0951:                                                    logFile, new Long(
0952:                                                            endPosition),
0953:                                                    new Long(eof));
0954:
0955:                                    /* Write zeros from incomplete log record to end of file */
0956:                                    long nWrites = (eof - endPosition)
0957:                                            / logBufferSize;
0958:                                    int rBytes = (int) ((eof - endPosition) % logBufferSize);
0959:                                    byte zeroBuf[] = new byte[logBufferSize];
0960:
0961:                                    //write the zeros to file
0962:                                    while (nWrites-- > 0)
0963:                                        theLog.write(zeroBuf);
0964:                                    if (rBytes != 0)
0965:                                        theLog.write(zeroBuf, 0, rBytes);
0966:
0967:                                    if (!isWriteSynced)
0968:                                        syncFile(theLog);
0969:                                }
0970:
0971:                                if (SanityManager.DEBUG) {
0972:                                    if (theLog.length() != endPosition) {
0973:                                        SanityManager
0974:                                                .ASSERT(
0975:                                                        theLog.length() > endPosition,
0976:                                                        "log end > log file length, bad scan");
0977:                                    }
0978:                                }
0979:
0980:                                // set the log to the true end position,
0981:                                // and not the end of the file
0982:
0983:                                lastFlush = endPosition;
0984:                                theLog.seek(endPosition);
0985:                            }
0986:                        }
0987:
0988:                        if (theLog != null)
0989:                            logOut = new LogAccessFile(this , theLog,
0990:                                    logBufferSize);
0991:
0992:                        if (logSwitchRequired)
0993:                            switchLogFile();
0994:
0995:                        boolean noInFlightTransactions = tf
0996:                                .noActiveUpdateTransaction();
0997:
0998:                        if (ReadOnlyDB) {
0999:                            // in the unlikely event that someone detects we are
1000:                            // dealing with a read only db, check to make sure the
1001:                            // database is quiesce when it was copied with no unflushed
1002:                            // dirty buffer
1003:                            if (!noInFlightTransactions) {
1004:                                throw StandardException
1005:                                        .newException(SQLState.LOG_READ_ONLY_DB_NEEDS_UNDO);
1006:                            }
1007:                        }
1008:
1009:                        /////////////////////////////////////////////////////////////
1010:                        //
1011:                        // Undo loop - in transaction factory.  It just gets one
1012:                        // transaction at a time from the transaction table and calls
1013:                        // undo, no different from runtime.
1014:                        //
1015:                        /////////////////////////////////////////////////////////////
1016:
1017:                        if (SanityManager.DEBUG) {
1018:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1019:                                SanityManager.DEBUG(LogToFile.DBG_FLAG,
1020:                                        "About to call undo(), transaction table ="
1021:                                                + tf.getTransactionTable());
1022:                        }
1023:
1024:                        if (!noInFlightTransactions) {
1025:                            if (SanityManager.DEBUG) {
1026:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1027:                                    SanityManager
1028:                                            .DEBUG(LogToFile.DBG_FLAG,
1029:                                                    "In recovery undo, rollback inflight transactions");
1030:                            }
1031:
1032:                            tf
1033:                                    .rollbackAllTransactions(
1034:                                            recoveryTransaction, rsf);
1035:
1036:                            if (SanityManager.DEBUG) {
1037:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1038:                                    SanityManager.DEBUG(LogToFile.DBG_FLAG,
1039:                                            "finish recovery undo,");
1040:                            }
1041:                        } else {
1042:                            if (SanityManager.DEBUG) {
1043:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1044:                                    SanityManager
1045:                                            .DEBUG(LogToFile.DBG_FLAG,
1046:                                                    "No in flight transaction, no recovery undo work");
1047:                            }
1048:                        }
1049:
1050:                        /////////////////////////////////////////////////////////////
1051:                        //
1052:                        // XA prepared xact loop - in transaction factory.  At this
1053:                        // point only prepared transactions should be left in the
1054:                        // transaction table, all others should have been aborted or
1055:                        // committed and removed from the transaction table.  It just
1056:                        // gets one transaction at a time from the transaction table,
1057:                        // creates a real context and transaction, reclaims locks,
1058:                        // and leaves the new xact in the transaction table.
1059:                        //
1060:                        /////////////////////////////////////////////////////////////
1061:
1062:                        if (SanityManager.DEBUG) {
1063:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1064:                                SanityManager.DEBUG(LogToFile.DBG_FLAG,
1065:                                        "About to call rePrepare(), transaction table ="
1066:                                                + tf.getTransactionTable());
1067:                        }
1068:
1069:                        tf.handlePreparedXacts(rsf);
1070:
1071:                        if (SanityManager.DEBUG) {
1072:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1073:                                SanityManager.DEBUG(LogToFile.DBG_FLAG,
1074:                                        "Finished rePrepare(), transaction table ="
1075:                                                + tf.getTransactionTable());
1076:                        }
1077:
1078:                        /////////////////////////////////////////////////////////////
1079:                        //
1080:                        // End of recovery.
1081:                        //
1082:                        /////////////////////////////////////////////////////////////
1083:
1084:                        // recovery is finished.  Close the transaction
1085:                        recoveryTransaction.close();
1086:
1087:                        // notify the dataFactory that recovery is completed,
1088:                        // but before the checkpoint is written.
1089:                        dataFactory.postRecovery();
1090:
1091:                        //////////////////////////////////////////////////////////////
1092:                        // set the transaction factory short id, we have seen all the
1093:                        // trasactions in the log, and at the minimum, the checkpoint
1094:                        // transaction will be there.  Set the shortId to the next
1095:                        // value.
1096:                        //////////////////////////////////////////////////////////////
1097:                        tf.resetTranId();
1098:
1099:                        // do a checkpoint (will flush the log) if there is any rollback
1100:                        // if can't checkpoint for some reasons, flush log and carry on
1101:                        if (!ReadOnlyDB) {
1102:                            boolean needCheckpoint = true;
1103:
1104:                            // if we can figure out there there is very little in the
1105:                            // log (less than 1000 bytes), we haven't done any
1106:                            // rollbacks, then don't checkpoint. Otherwise checkpoint.
1107:                            if (currentCheckpoint != null
1108:                                    && noInFlightTransactions
1109:                                    && redoLWM != LogCounter.INVALID_LOG_INSTANT
1110:                                    && undoLWM != LogCounter.INVALID_LOG_INSTANT) {
1111:                                if ((logFileNumber == LogCounter
1112:                                        .getLogFileNumber(redoLWM))
1113:                                        && (logFileNumber == LogCounter
1114:                                                .getLogFileNumber(undoLWM))
1115:                                        && (endPosition < (LogCounter
1116:                                                .getLogFilePosition(redoLWM) + 1000)))
1117:                                    needCheckpoint = false;
1118:                            }
1119:
1120:                            if (needCheckpoint
1121:                                    && !checkpoint(rsf, df, tf, false))
1122:                                flush(logFileNumber, endPosition);
1123:                        }
1124:
1125:                        logger.close();
1126:
1127:                        recoveryNeeded = false;
1128:                    } catch (IOException ioe) {
1129:                        if (SanityManager.DEBUG)
1130:                            ioe.printStackTrace();
1131:
1132:                        throw markCorrupt(StandardException.newException(
1133:                                SQLState.LOG_IO_ERROR, ioe));
1134:                    } catch (ClassNotFoundException cnfe) {
1135:                        throw markCorrupt(StandardException.newException(
1136:                                SQLState.LOG_CORRUPTED, cnfe));
1137:                    } catch (StandardException se) {
1138:                        throw markCorrupt(se);
1139:                    } catch (Throwable th) {
1140:                        if (SanityManager.DEBUG) {
1141:                            SanityManager.showTrace(th);
1142:                            th.printStackTrace();
1143:                        }
1144:
1145:                        throw markCorrupt(StandardException.newException(
1146:                                SQLState.LOG_RECOVERY_FAILED, th));
1147:                    }
1148:                } else {
1149:
1150:                    tf.useTransactionTable((Formatable) null);
1151:
1152:                    // set the transaction factory short id
1153:                    tf.resetTranId();
1154:                }
1155:
1156:                // done with recovery        
1157:
1158:                /////////////////////////////////////////////////////////////
1159:                // setup checktpoint daemon
1160:                /////////////////////////////////////////////////////////////
1161:                checkpointDaemon = rawStoreFactory.getDaemon();
1162:                if (checkpointDaemon != null) {
1163:                    myClientNumber = checkpointDaemon
1164:                            .subscribe(this , true /*onDemandOnly */);
1165:                }
1166:            }
1167:
1168:            /**
1169:            	Checkpoint the rawStore.
1170:
1171:            	<P> MT- Only one checkpoint is to be taking place at any given time.
1172:
1173:            	<P> The steps of a checkpoint are
1174:
1175:            	<OL>
1176:            	<LI> switch to a new log file if possible
1177:            	<PRE>
1178:            		freeze the log (for the transition to a new log file)
1179:            			flush current log file
1180:            			create and flush the new log file (with file number 1 higher
1181:                        than the previous log file). The new log file becomes the
1182:                        current log file.
1183:            		unfreeze the log
1184:            	</PRE>
1185:            	<LI> start checkpoint transaction
1186:            	<LI> gather interesting information about the rawStore:
1187:            				the current log instant (redoLWM)
1188:            				the earliest active transaction begin tran log record 
1189:                            instant (undoLWM), all the truncation LWM set by clients 
1190:                            of raw store (replication)
1191:            	<LI> clean the buffer cache 
1192:            	<LI> log the next checkpoint log record, which contains 
1193:            			(repPoint, undoLWM, redoLWM) and commit checkpoint transaction.
1194:            	<LI> synchronously write the control file containing the next checkpoint
1195:            			log record log instant
1196:            	<LI> the new checkpoint becomes the current checkpoint.
1197:            			Somewhere near the beginning of each log file should be a
1198:            			checkpoint log record (not guarenteed to be there)
1199:            	<LI> see if the log can be truncated
1200:
1201:            	<P>
1202:            	The earliest useful log record is determined by the repPoint and the 
1203:                undoLWM, whichever is earlier. 
1204:            	<P>
1205:            	Every log file whose log file number is smaller than the earliest 
1206:                useful log record's log file number can be deleted.
1207:
1208:            	<P><PRE>
1209:            		Transactions can be at the following states w/r to a checkpoint -
1210:            		consider the log as a continous stream and not as series of log 
1211:                    files for the sake of clarity.  
1212:            		|(BT)-------(ET)| marks the begin and end of a transaction.
1213:            		.                          checkpoint started
1214:            		.       |__undoLWM          |
1215:            		.       V                   |___redoLWM
1216:            		.                           |___TruncationLWM
1217:            		.                           |
1218:            		.                           V
1219:            		1 |-----------------|
1220:            		2       |--------------------------------|
1221:            		3           |-------|
1222:            		4               |--------------------------------------(end of log)
1223:            		5                                       |-^-|
1224:            		.                                   Checkpoint Log Record
1225:            		---A--->|<-------B--------->|<-------------C-----------
1226:            	</PRE>
1227:
1228:            	<P>
1229:            	There are only 3 periods of interest : <BR>
1230:            		A) before undoLWM,  B) between undo and redo LWM, C) after redoLWM.
1231:
1232:            	<P>
1233:            	Transaction 1 started in A and terminates in B.<BR>
1234:            		During redo, we should only see log records and endXact from this
1235:            		transaction in the first phase (between undoLWM and redoLWM).  No
1236:            		beginXact log record for this transaction will be seen.
1237:
1238:            	<P>
1239:            	Transaction 2 started in B (right on the undoLWM) and terminated in C.<BR>
1240:            		Any transaction that terminates in C must have a beginXact at or 
1241:                    after undoLWM.  In other words, no transaction can span A, B and C.
1242:            		During redo, we will see beginXact, other log records and endXact 
1243:                    for this transaction.
1244:
1245:            	<P>
1246:            	Transaction 3 started in B and ended in B.<BR>
1247:            		During redo, we will see beginXact, other log records and endXact 
1248:                    for this transaction.
1249:
1250:            	<P>
1251:            	Transaction 4 begins in B and never ends.<BR>
1252:            		During redo, we will see beginXact, other log records.
1253:            		In undo, this loser transaction will be rolled back.
1254:
1255:            	<P>
1256:            	Transaction 5 is the transaction taking the checkpoint.<BR>
1257:            	    The checkpoint action started way back in time but the checkpoint
1258:            		log record is only written after the buffer cache has been flushed.
1259:
1260:            	<P>
1261:            	Note that if any time elapse between taking the undoLWM and the
1262:            		redoLWM, then it will create a 4th period of interest.
1263:
1264:            	@exception StandardException - encounter exception while doing 
1265:                                               checkpoint.
1266:             */
1267:            public boolean checkpoint(RawStoreFactory rsf, DataFactory df,
1268:                    TransactionFactory tf, boolean wait)
1269:                    throws StandardException {
1270:
1271:                // call checkpoint with no pre-started transaction
1272:                boolean done = checkpointWithTran(null, rsf, df, tf);
1273:
1274:                //above checpoint call will return 'false'  without
1275:                //performing the checkpoint if some other  thread is doing checkpoint. 
1276:                //In  cases like backup it is necesary to wait for the 
1277:                //checkpoint to complete before copying the files. 'wait' flag get passed 
1278:                //in as 'true' by  such cases.
1279:                //When wait flag is true, we will wait here until the other thread which
1280:                //is actually doing the the checkpoint completes.
1281:
1282:                if (!done && wait) {
1283:                    synchronized (this ) {
1284:                        //wait until the thread that is doing the checkpoint completes it.
1285:                        while (inCheckpoint) {
1286:                            try {
1287:                                wait();
1288:                            } catch (InterruptedException ie) {
1289:                                throw StandardException.interrupt(ie);
1290:                            }
1291:                        }
1292:                        done = true;
1293:                    }
1294:                }
1295:
1296:                return done;
1297:            }
1298:
1299:            /**
1300:            	checkpoint with pre-start transaction
1301:
1302:            	@exception StandardException Cloudscape Standard Error Policy 
1303:             */
1304:            protected boolean checkpointWithTran(RawTransaction cptran,
1305:                    RawStoreFactory rsf, DataFactory df, TransactionFactory tf)
1306:                    throws StandardException {
1307:                boolean proceed = true;
1308:                LogInstant redoLWM;
1309:
1310:                // we may be called to stop the database after a bad error, make sure
1311:                // logout is set
1312:                if (logOut == null) {
1313:                    return false;
1314:                }
1315:
1316:                long approxLogLength;
1317:
1318:                synchronized (this ) {
1319:                    // has someone else found a problem in the raw store?  
1320:                    if (corrupt != null) {
1321:                        throw StandardException.newException(
1322:                                SQLState.LOG_STORE_CORRUPT, corrupt);
1323:                    }
1324:
1325:                    // if another checkpoint is in progress, don't do anything
1326:                    if (inCheckpoint == true)
1327:                        proceed = false;
1328:                    else
1329:                        inCheckpoint = true;
1330:
1331:                    approxLogLength = endPosition; // current end position
1332:
1333:                    // don't return from inside of a sync block
1334:                }
1335:
1336:                if (!proceed) {
1337:                    return false;
1338:                }
1339:
1340:                // needCPtran == true if we are not supplied with a pre-started transaction
1341:                boolean needCPTran = (cptran == null);
1342:
1343:                if (SanityManager.DEBUG) {
1344:                    if (logSwitchInterval == 0) {
1345:                        SanityManager
1346:                                .THROWASSERT("switching log file: Approx log length = "
1347:                                        + approxLogLength
1348:                                        + " logSwitchInterval = 0");
1349:                    }
1350:                }
1351:
1352:                try {
1353:                    if (approxLogLength > logSwitchInterval) {
1354:                        switchLogFile();
1355:                        //log switch is occuring in conjuction with the 
1356:                        //checkpoint, set the amount of log written from last checkpoint to zero.
1357:                        logWrittenFromLastCheckPoint = 0;
1358:                    } else {
1359:                        //checkpoint is happening without the log switch,
1360:                        //in the middle of a log file. Amount of log written already for
1361:                        //the current log file should not be included in caluculation 
1362:                        //of when next check point is due. By assigning the negative
1363:                        //value of amount of log writtent for this file. Later it will
1364:                        //be subtracted when we switch the log file or while calculating whether 
1365:                        //we are due a for checkpoint a flush time.
1366:                        logWrittenFromLastCheckPoint = -endPosition;
1367:                    }
1368:
1369:                    if (SanityManager.DEBUG) {
1370:                        // if this debug flag is set on, just switch log file
1371:
1372:                        if (SanityManager.DEBUG_ON(TEST_LOG_SWITCH_LOG))
1373:                            return false;
1374:                    }
1375:
1376:                    // start a checkpoint transaction 
1377:                    if (needCPTran)
1378:                        cptran = tf.startInternalTransaction(rsf,
1379:                                ContextService.getFactory()
1380:                                        .getCurrentContextManager());
1381:
1382:                    /////////////////////////////////////////////////////
1383:                    // gather a snapshot of the various interesting points of the log
1384:                    /////////////////////////////////////////////////////
1385:                    long undoLWM_long;
1386:                    long redoLWM_long;
1387:
1388:                    synchronized (this ) // we could synchronized on something else, it
1389:                    // doesn't matter as long as logAndDo sync on
1390:                    // the same thing
1391:                    {
1392:                        // The redo LWM is the current log instant.  We are going to 
1393:                        // clean the cache shortly, any log record before this point 
1394:                        // will not ever need to be redone.
1395:                        redoLWM_long = currentInstant();
1396:                        redoLWM = new LogCounter(redoLWM_long);
1397:
1398:                        // The undo LWM is what we need to rollback all transactions.
1399:                        // Synchronize this with the starting of a new transaction so 
1400:                        // that the transaction factory can have a consistent view
1401:                        // See FileLogger.logAndDo
1402:
1403:                        LogCounter undoLWM = (LogCounter) (tf
1404:                                .firstUpdateInstant());
1405:                        if (undoLWM == null)
1406:                            undoLWM_long = redoLWM_long; // no active transaction 
1407:                        else
1408:                            undoLWM_long = undoLWM.getValueAsLong();
1409:
1410:                    }
1411:
1412:                    /////////////////////////////////////////////////////
1413:                    // clean the buffer cache
1414:                    /////////////////////////////////////////////////////
1415:                    df.checkpoint();
1416:
1417:                    /////////////////////////////////////////////////////
1418:                    // write out the checkpoint log record
1419:                    /////////////////////////////////////////////////////
1420:
1421:                    // send the checkpoint record to the log
1422:                    Formatable transactionTable = tf.getTransactionTable();
1423:
1424:                    CheckpointOperation nextCheckpoint = new CheckpointOperation(
1425:                            redoLWM_long, undoLWM_long, transactionTable);
1426:
1427:                    cptran.logAndDo(nextCheckpoint);
1428:
1429:                    LogCounter checkpointInstant = (LogCounter) (cptran
1430:                            .getLastLogInstant());
1431:
1432:                    if (checkpointInstant != null) {
1433:                        // since checkpoint is an internal transaction, I need to 
1434:                        // flush it to make sure it actually goes to the log
1435:                        flush(checkpointInstant);
1436:                    } else {
1437:                        throw StandardException
1438:                                .newException(SQLState.LOG_CANNOT_LOG_CHECKPOINT);
1439:                    }
1440:
1441:                    cptran.commit();
1442:
1443:                    if (needCPTran) {
1444:                        cptran.close(); // if we started it, we will close it
1445:                        cptran = null;
1446:                    }
1447:
1448:                    /////////////////////////////////////////////////////
1449:                    // write out the log control file which contains the last
1450:                    // successful checkpoint log record
1451:                    /////////////////////////////////////////////////////
1452:
1453:                    if (!writeControlFile(getControlFileName(),
1454:                            checkpointInstant.getValueAsLong())) {
1455:                        throw StandardException
1456:                                .newException(SQLState.LOG_CONTROL_FILE,
1457:                                        getControlFileName());
1458:                    }
1459:
1460:                    // next checkpoint becomes the current checkpoint
1461:                    currentCheckpoint = nextCheckpoint;
1462:
1463:                    ////////////////////////////////////////////////////
1464:                    // see if we can reclaim some log space
1465:                    ////////////////////////////////////////////////////
1466:
1467:                    if (!logArchived()) {
1468:                        truncateLog(currentCheckpoint);
1469:                    }
1470:
1471:                    // delete the committted container drop stubs 
1472:                    // that are no longer required during recovery. 
1473:                    // If a backup is in progress don't delete the stubs until 
1474:                    // it is done. Backup needs to copy all the stubs that 
1475:                    // are needed to recover from the backup checkpoint on restore.
1476:                    if (!backupInProgress)
1477:                        df.removeDroppedContainerFileStubs(redoLWM);
1478:
1479:                } catch (IOException ioe) {
1480:                    throw markCorrupt(StandardException.newException(
1481:                            SQLState.LOG_IO_ERROR, ioe));
1482:                } finally {
1483:                    synchronized (this ) {
1484:
1485:                        inCheckpoint = false;
1486:                        notifyAll();
1487:                    }
1488:
1489:                    if (cptran != null && needCPTran) {
1490:                        try {
1491:                            cptran.commit();
1492:                            cptran.close();
1493:                        } catch (StandardException se) {
1494:                            throw markCorrupt(StandardException.newException(
1495:                                    SQLState.LOG_CORRUPTED, se));
1496:                        }
1497:                    }
1498:                }
1499:
1500:                return true;
1501:            }
1502:
1503:            /**
1504:            	Flush all unwritten log record up to the log instance indicated to disk
1505:            	and sync.
1506:            	Also check to see if database is frozen or corrupt.
1507:
1508:            	<P>MT - not needed, wrapper method
1509:
1510:            	@param where flush log up to here
1511:
1512:            	@exception StandardException Standard Cloudscape error policy
1513:             */
1514:            public void flush(LogInstant where) throws StandardException {
1515:                long fileNumber;
1516:                long wherePosition;
1517:
1518:                if (where == null) {
1519:                    // don't flush, just use this to check if database is frozen or
1520:                    // corrupt 
1521:                    fileNumber = 0;
1522:                    wherePosition = LogCounter.INVALID_LOG_INSTANT;
1523:                } else {
1524:                    LogCounter whereC = (LogCounter) where;
1525:                    fileNumber = whereC.getLogFileNumber();
1526:                    wherePosition = whereC.getLogFilePosition();
1527:                }
1528:                flush(fileNumber, wherePosition);
1529:            }
1530:
1531:            /**
1532:            	Flush all unwritten log record to disk and sync.
1533:            	Also check to see if database is frozen or corrupt.
1534:
1535:            	<P>MT - not needed, wrapper method
1536:
1537:            	@exception StandardException Standard Cloudscape error policy
1538:             */
1539:            public void flushAll() throws StandardException {
1540:                long fnum;
1541:                long whereTo;
1542:
1543:                synchronized (this ) {
1544:                    fnum = logFileNumber;
1545:                    whereTo = endPosition;
1546:                }
1547:
1548:                flush(fnum, whereTo);
1549:            }
1550:
1551:            /*
1552:             * Private methods that helps to implement methods of LogFactory
1553:             */
1554:
1555:            /**
1556:            	Verify that we the log file is of the right format and of the right
1557:            	version and log file number.
1558:
1559:            	<P>MT - not needed, no global variables used
1560:
1561:            	@param logFileName the name of the log file
1562:            	@param number the log file number
1563:            	@return true if the log file is of the current version and of the
1564:            	correct format
1565:
1566:            	@exception StandardException Standard Cloudscape error policy
1567:             */
1568:            private boolean verifyLogFormat(StorageFile logFileName, long number)
1569:                    throws StandardException {
1570:                boolean ret = false;
1571:                try {
1572:                    StorageRandomAccessFile log = privRandomAccessFile(
1573:                            logFileName, "r");
1574:                    ret = verifyLogFormat(log, number);
1575:                    log.close();
1576:                } catch (IOException ioe) {
1577:
1578:                }
1579:
1580:                return ret;
1581:            }
1582:
1583:            /**
1584:            	Verify that we the log file is of the right format and of the right
1585:            	version and log file number.  The log file position is set to the
1586:            	beginning.
1587:
1588:            	<P>MT - MT-unsafe, caller must synchronize
1589:
1590:            	@param log the log file
1591:            	@param number the log file number
1592:            	@return true if the log file is of the current version and of the
1593:            	correct format
1594:
1595:            	@exception StandardException Standard Cloudscape error policy
1596:             */
1597:            private boolean verifyLogFormat(StorageRandomAccessFile log,
1598:                    long number) throws StandardException {
1599:                try {
1600:                    log.seek(0);
1601:                    int logfid = log.readInt();
1602:                    int obsoleteLogVersion = log.readInt(); // this value is useless, for
1603:                    // backwards compatibility
1604:                    long logNumber = log.readLong();
1605:
1606:                    if (logfid != fid || logNumber != number) {
1607:                        throw StandardException
1608:                                .newException(SQLState.LOG_INCOMPATIBLE_FORMAT,
1609:                                        dataDirectory);
1610:                    }
1611:                } catch (IOException ioe) {
1612:                    throw StandardException.newException(
1613:                            SQLState.LOG_CANNOT_VERIFY_LOG_FORMAT, ioe,
1614:                            dataDirectory);
1615:                }
1616:
1617:                return true;
1618:            }
1619:
1620:            /**
1621:            	Initialize the log to the correct format with the given version and
1622:            	log file number.  The new log file must be empty.  After initializing,
1623:            	the file is synchronously written to disk.
1624:
1625:            	<P>MT - synchornization provided by caller
1626:
1627:            	@param newlog the new log file to be initialized
1628:            	@param number the log file number
1629:            	@param prevLogRecordEndInstant the end position of the  previous log record
1630:
1631:            	@return true if the log file is empty, else false.
1632:
1633:            	@exception IOException if new log file cannot be accessed or initialized
1634:             */
1635:
1636:            private boolean initLogFile(StorageRandomAccessFile newlog,
1637:                    long number, long prevLogRecordEndInstant)
1638:                    throws IOException, StandardException {
1639:                if (newlog.length() != 0)
1640:                    return false;
1641:
1642:                if (SanityManager.DEBUG) {
1643:                    if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
1644:                        testLogFull();
1645:                }
1646:                if (SanityManager.DEBUG) {
1647:                    if (SanityManager.DEBUG_ON(TEST_SWITCH_LOG_FAIL1))
1648:                        throw new IOException("TestLogSwitchFail1");
1649:                }
1650:
1651:                newlog.seek(0);
1652:
1653:                newlog.writeInt(fid);
1654:                newlog.writeInt(OBSOLETE_LOG_VERSION_NUMBER); // for silly backwards compatibility reason
1655:                newlog.writeLong(number);
1656:                newlog.writeLong(prevLogRecordEndInstant);
1657:
1658:                syncFile(newlog);
1659:
1660:                return true;
1661:            }
1662:
1663:            /**
1664:            	Switch to the next log file if possible.
1665:
1666:            	<P>MT - log factory is single threaded thru a log file switch, the log
1667:            	is frozen for the duration of the switch
1668:             */
1669:            private void switchLogFile() throws StandardException {
1670:                boolean switchedOver = false;
1671:
1672:                /////////////////////////////////////////////////////
1673:                // Freeze the log for the switch over to a new log file.
1674:                // This blocks out any other threads from sending log
1675:                // record to the log stream.
1676:                // 
1677:                // The switching of the log file and checkpoint are really
1678:                // independent events, they are tied together just because a
1679:                // checkpoint is the natural place to switch the log and vice
1680:                // versa.  This could happen before the cache is flushed or
1681:                // after the checkpoint log record is written.
1682:                /////////////////////////////////////////////////////
1683:                synchronized (this ) {
1684:
1685:                    // Make sure that this thread of control is guaranteed to complete
1686:                    // it's work of switching the log file without having to give up
1687:                    // the semaphore to a backup or another flusher.  Do this by looping
1688:                    // until we have the semaphore, the log is not being flushed, and
1689:                    // the log is not frozen for backup.  Track (2985). 
1690:                    while (logBeingFlushed | isFrozen) {
1691:                        try {
1692:                            wait();
1693:                        } catch (InterruptedException ie) {
1694:                            throw StandardException.interrupt(ie);
1695:                        }
1696:                    }
1697:
1698:                    // we have an empty log file here, refuse to switch.
1699:                    if (endPosition == LOG_FILE_HEADER_SIZE) {
1700:                        if (SanityManager.DEBUG) {
1701:                            Monitor
1702:                                    .logMessage("not switching from an empty log file ("
1703:                                            + logFileNumber + ")");
1704:                        }
1705:                        return;
1706:                    }
1707:
1708:                    // log file isn't being flushed right now and logOut is not being
1709:                    // used.
1710:                    StorageFile newLogFile = getLogFileName(logFileNumber + 1);
1711:
1712:                    if (logFileNumber + 1 >= maxLogFileNumber) {
1713:                        throw StandardException.newException(
1714:                                SQLState.LOG_EXCEED_MAX_LOG_FILE_NUMBER,
1715:                                new Long(maxLogFileNumber));
1716:                    }
1717:
1718:                    StorageRandomAccessFile newLog = null; // the new log file
1719:                    try {
1720:                        // if the log file exist and cannot be deleted, cannot
1721:                        // switch log right now
1722:                        if (privExists(newLogFile) && !privDelete(newLogFile)) {
1723:                            logErrMsg(MessageService.getTextMessage(
1724:                                    MessageId.LOG_NEW_LOGFILE_EXIST, newLogFile
1725:                                            .getPath()));
1726:                            return;
1727:                        }
1728:
1729:                        try {
1730:                            newLog = privRandomAccessFile(newLogFile, "rw");
1731:                        } catch (IOException ioe) {
1732:                            newLog = null;
1733:                        }
1734:
1735:                        if (newLog == null || !privCanWrite(newLogFile)) {
1736:                            if (newLog != null)
1737:                                newLog.close();
1738:                            newLog = null;
1739:
1740:                            return;
1741:                        }
1742:
1743:                        if (initLogFile(newLog, logFileNumber + 1, LogCounter
1744:                                .makeLogInstantAsLong(logFileNumber,
1745:                                        endPosition))) {
1746:
1747:                            // New log file init ok, close the old one and
1748:                            // switch over, after this point, need to shutdown the
1749:                            // database if any error crops up
1750:                            switchedOver = true;
1751:
1752:                            // write out an extra 0 at the end to mark the end of the log
1753:                            // file.
1754:
1755:                            logOut.writeEndMarker(0);
1756:
1757:                            endPosition += 4;
1758:                            //set that we are in log switch to prevent flusher 
1759:                            //not requesting  to switch log again 
1760:                            inLogSwitch = true;
1761:                            // flush everything including the int we just wrote
1762:                            flush(logFileNumber, endPosition);
1763:
1764:                            // simulate out of log error after the switch over
1765:                            if (SanityManager.DEBUG) {
1766:                                if (SanityManager
1767:                                        .DEBUG_ON(TEST_SWITCH_LOG_FAIL2))
1768:                                    throw new IOException("TestLogSwitchFail2");
1769:                            }
1770:
1771:                            logOut.close(); // close the old log file
1772:
1773:                            logWrittenFromLastCheckPoint += endPosition;
1774:
1775:                            endPosition = newLog.getFilePointer();
1776:                            lastFlush = endPosition;
1777:
1778:                            if (isWriteSynced) {
1779:                                //extend the file by wring zeros to it
1780:                                preAllocateNewLogFile(newLog);
1781:                                newLog.close();
1782:                                newLog = openLogFileInWriteMode(newLogFile);
1783:                                newLog.seek(endPosition);
1784:                            }
1785:
1786:                            logOut = new LogAccessFile(this , newLog,
1787:                                    logBufferSize);
1788:                            newLog = null;
1789:
1790:                            if (SanityManager.DEBUG) {
1791:                                if (endPosition != LOG_FILE_HEADER_SIZE)
1792:                                    SanityManager
1793:                                            .THROWASSERT("new log file has unexpected size"
1794:                                                    + +endPosition);
1795:                            }
1796:                            logFileNumber++;
1797:
1798:                            if (SanityManager.DEBUG) {
1799:                                SanityManager.ASSERT(
1800:                                        endPosition == LOG_FILE_HEADER_SIZE,
1801:                                        "empty log file has wrong size");
1802:                            }
1803:
1804:                        } else // something went wrong, delete the half baked file
1805:                        {
1806:                            newLog.close();
1807:                            newLog = null;
1808:
1809:                            if (privExists(newLogFile))
1810:                                privDelete(newLogFile);
1811:                            newLogFile = null;
1812:
1813:                            logErrMsg(MessageService.getTextMessage(
1814:                                    MessageId.LOG_CANNOT_CREATE_NEW, newLogFile
1815:                                            .getPath()));
1816:                        }
1817:
1818:                    } catch (IOException ioe) {
1819:
1820:                        inLogSwitch = false;
1821:                        // switching log file is an optional operation and there is no direct user
1822:                        // control.  Just sends a warning message to whomever, if any,
1823:                        // system adminstrator there may be
1824:
1825:                        logErrMsg(MessageService.getTextMessage(
1826:                                MessageId.LOG_CANNOT_CREATE_NEW_DUETO,
1827:                                newLogFile.getPath(), ioe.toString()));
1828:
1829:                        try {
1830:                            if (newLog != null) {
1831:                                newLog.close();
1832:                                newLog = null;
1833:                            }
1834:                        } catch (IOException ioe2) {
1835:                        }
1836:
1837:                        if (newLogFile != null && privExists(newLogFile)) {
1838:                            privDelete(newLogFile);
1839:                            newLogFile = null;
1840:                        }
1841:
1842:                        if (switchedOver) // error occur after old log file has been closed!
1843:                        {
1844:                            logOut = null; // limit any damage
1845:                            throw markCorrupt(StandardException.newException(
1846:                                    SQLState.LOG_IO_ERROR, ioe));
1847:                        }
1848:                    }
1849:
1850:                    inLogSwitch = false;
1851:                }
1852:                // unfreezes the log
1853:            }
1854:
1855:            /**
1856:            	Flush all unwritten log record up to the log instance indicated to disk
1857:            	without syncing.
1858:
1859:            	<P>MT - not needed, wrapper method
1860:
1861:            	@param wherePosition flush log up to here
1862:
1863:            	@exception IOException Failed to flush to the log
1864:             */
1865:            private void flushBuffer(long fileNumber, long wherePosition)
1866:                    throws IOException, StandardException {
1867:                synchronized (this ) {
1868:                    if (fileNumber < logFileNumber) // history
1869:                        return;
1870:
1871:                    // A log instant indicates the start of a log record
1872:                    // but not how long it is. Thus the amount of data in
1873:                    // the logOut buffer is irrelevant. We can only
1874:                    // not flush the buffer if the real synced flush
1875:                    // included this required log instant. This is because
1876:                    // we never flush & sync partial log records.
1877:
1878:                    if (wherePosition < lastFlush) // already flushed
1879:                        return;
1880:
1881:                    // We don't update lastFlush here because lastFlush
1882:                    // is the last position in the log file that has been
1883:                    // flushed *and* synced to disk. Here we only flush.
1884:                    // ie. lastFlush should be renamed lastSync.
1885:                    //
1886:                    // We could have another variable indicating to which
1887:                    // point the log has been flushed which this routine
1888:                    // could take advantage of. This would only help rollbacks though.
1889:
1890:                    logOut.flushLogAccessFile();
1891:                }
1892:            }
1893:
1894:            /** Get rid of old and unnecessary log files
1895:
1896:            	<P> MT- only one truncate log is allowed to be taking place at any
1897:            	given time.  Synchronized on this.
1898:
1899:             */
1900:            private void truncateLog(CheckpointOperation checkpoint) {
1901:                long oldFirstLog;
1902:                long firstLogNeeded;
1903:
1904:                if (keepAllLogs)
1905:                    return;
1906:                if ((firstLogNeeded = getFirstLogNeeded(checkpoint)) == -1)
1907:                    return;
1908:
1909:                // when  backup is in progress, log files that are yet to
1910:                // be copied to the backup should not be deleted,  even 
1911:                // if they are not required  for crash recovery.
1912:                if (backupInProgress) {
1913:                    long logFileNeededForBackup = logFileToBackup;
1914:                    // check if the log file number is yet to be copied 
1915:                    // to the backup is less than the log file required 
1916:                    // for crash recovery, if it is then make the first 
1917:                    // log file that should not be deleted is the log file 
1918:                    // that is yet to  be copied to the backup.  
1919:                    if (logFileNeededForBackup < firstLogNeeded)
1920:                        firstLogNeeded = logFileNeededForBackup;
1921:                }
1922:
1923:                oldFirstLog = firstLogFileNumber;
1924:                firstLogFileNumber = firstLogNeeded;
1925:
1926:                while (oldFirstLog < firstLogNeeded) {
1927:                    StorageFile uselessLogFile = null;
1928:                    try {
1929:                        uselessLogFile = getLogFileName(oldFirstLog);
1930:                        if (privDelete(uselessLogFile)) {
1931:                            if (SanityManager.DEBUG) {
1932:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1933:                                    SanityManager.DEBUG(DBG_FLAG,
1934:                                            "truncating useless log file "
1935:                                                    + uselessLogFile.getPath());
1936:                            }
1937:                        } else {
1938:                            if (SanityManager.DEBUG) {
1939:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1940:                                    SanityManager.DEBUG(DBG_FLAG,
1941:                                            "Fail to truncate useless log file "
1942:                                                    + uselessLogFile.getPath());
1943:                            }
1944:                        }
1945:                    } catch (StandardException se) {
1946:                        if (SanityManager.DEBUG)
1947:                            SanityManager.THROWASSERT(
1948:                                    "error opening log segment while deleting "
1949:                                            + uselessLogFile.getPath(), se);
1950:
1951:                        // if insane, just leave it be
1952:                    }
1953:
1954:                    oldFirstLog++;
1955:                }
1956:            }
1957:
1958:            /**
1959:             * Return the "oldest" log file still needed by recovery. 
1960:             * <p>
1961:             * Returns the log file that contains the undoLWM, ie. the oldest
1962:             * log record of all uncommitted transactions in the given checkpoint.
1963:             * 
1964:             * If no checkpoint is given then returns -1, indicating all log records
1965:             * may be necessary.
1966:             *
1967:             **/
1968:            private long getFirstLogNeeded(CheckpointOperation checkpoint) {
1969:                long firstLogNeeded;
1970:
1971:                // one truncation at a time
1972:                synchronized (this ) {
1973:                    firstLogNeeded = (checkpoint != null ? LogCounter
1974:                            .getLogFileNumber(checkpoint.undoLWM()) : -1);
1975:
1976:                    if (SanityManager.DEBUG) {
1977:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
1978:                            SanityManager.DEBUG(DBG_FLAG,
1979:                                    "truncatLog: undoLWM firstlog needed "
1980:                                            + firstLogNeeded);
1981:                    }
1982:
1983:                    if (SanityManager.DEBUG) {
1984:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
1985:                            SanityManager.DEBUG(DBG_FLAG,
1986:                                    "truncatLog: checkpoint truncationLWM firstlog needed "
1987:                                            + firstLogNeeded);
1988:                            SanityManager.DEBUG(DBG_FLAG,
1989:                                    "truncatLog: firstLogFileNumber = "
1990:                                            + firstLogFileNumber);
1991:                        }
1992:                    }
1993:                }
1994:                return firstLogNeeded;
1995:            }
1996:
1997:            /**
1998:            	Carefully write out this value to the control file.
1999:                We do safe write of this data by writing the data 
2000:                into two files every time we write the control data.
2001:                we write checksum at the end of the file, so if by
2002:                chance system crashes while writing into the file,
2003:                using the checksum we find that the control file
2004:                is hosed then we  use the mirror file, which will have
2005:                the condrol data written at last check point.
2006:
2007:            	see comment at beginning of file for log control file format.
2008:
2009:            	<P> MT- synchronized by caller
2010:             */
2011:            boolean writeControlFile(StorageFile logControlFileName, long value)
2012:                    throws IOException, StandardException {
2013:                StorageRandomAccessFile logControlFile = null;
2014:
2015:                ByteArrayOutputStream baos = new ByteArrayOutputStream(64);
2016:                DataOutputStream daos = new DataOutputStream(baos);
2017:
2018:                daos.writeInt(fid);
2019:
2020:                // so that when this db is booted by 1.1x and 1.2x JBMS, a IOException
2021:                // stack trace rather than some error message that tells
2022:                // the user to delete the database will show up.
2023:                daos.writeInt(OBSOLETE_LOG_VERSION_NUMBER);
2024:                daos.writeLong(value);
2025:
2026:                if (onDiskMajorVersion == 0) {
2027:                    onDiskMajorVersion = jbmsVersion.getMajorVersion();
2028:                    onDiskMinorVersion = jbmsVersion.getMinorVersion();
2029:                    onDiskBeta = jbmsVersion.isBeta();
2030:                }
2031:
2032:                // previous to 1.3, that's all we wrote.  
2033:                // from 1.3 and onward, also write out the JBMSVersion 
2034:                daos.writeInt(onDiskMajorVersion);
2035:                daos.writeInt(onDiskMinorVersion);
2036:
2037:                // For 2.0 beta we added the build number and the isBeta indication.
2038:                // (5 bytes from our first spare long)
2039:                daos.writeInt(jbmsVersion.getBuildNumberAsInt());
2040:
2041:                byte flags = 0;
2042:                if (onDiskBeta)
2043:                    flags |= IS_BETA_FLAG;
2044:
2045:                // When database is booted with derby.system.durability=test,
2046:                // this mode does not guarantee that 
2047:                // - database will recover 
2048:                // - committed transactions will not be lost
2049:                // - database will be in a consistent state
2050:                // Hence necessary to keep track of this state so we don't 
2051:                // waste time resolving issues in such cases.
2052:                // wasDBInDurabilityTestModeNoSync has information if database was
2053:                // previously booted at any time in this mode
2054:                if (logNotSynced || wasDBInDurabilityTestModeNoSync)
2055:                    flags |= IS_DURABILITY_TESTMODE_NO_SYNC_FLAG;
2056:                daos.writeByte(flags);
2057:
2058:                //
2059:                // write some spare bytes after 2.0 we have 3 + 2(8) spare bytes.
2060:                long spare = 0;
2061:
2062:                daos.writeByte(0);
2063:                daos.writeByte(0);
2064:                daos.writeByte(0);
2065:                daos.writeLong(spare);
2066:                daos.flush();
2067:                // write the checksum for the control data written
2068:                checksum.reset();
2069:                checksum.update(baos.toByteArray(), 0, baos.size());
2070:                daos.writeLong(checksum.getValue());
2071:                daos.flush();
2072:
2073:                try {
2074:                    checkCorrupt();
2075:
2076:                    try {
2077:                        logControlFile = privRandomAccessFile(
2078:                                logControlFileName, "rw");
2079:                    } catch (IOException ioe) {
2080:                        logControlFile = null;
2081:                        return false;
2082:                    }
2083:
2084:                    if (!privCanWrite(logControlFileName))
2085:                        return false;
2086:
2087:                    if (SanityManager.DEBUG) {
2088:                        if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
2089:                            testLogFull();
2090:                    }
2091:
2092:                    logControlFile.seek(0);
2093:                    logControlFile.write(baos.toByteArray());
2094:                    syncFile(logControlFile);
2095:                    logControlFile.close();
2096:
2097:                    // write the same data to mirror control file
2098:                    try {
2099:                        logControlFile = privRandomAccessFile(
2100:                                getMirrorControlFileName(), "rw");
2101:                    } catch (IOException ioe) {
2102:                        logControlFile = null;
2103:                        return false;
2104:                    }
2105:
2106:                    logControlFile.seek(0);
2107:                    logControlFile.write(baos.toByteArray());
2108:                    syncFile(logControlFile);
2109:
2110:                } finally {
2111:                    if (logControlFile != null)
2112:                        logControlFile.close();
2113:                }
2114:
2115:                return true;
2116:
2117:            }
2118:
2119:            /*
2120:            	Carefully read the content of the control file.
2121:
2122:            	<P> MT- read only
2123:             */
2124:            private long readControlFile(StorageFile logControlFileName,
2125:                    Properties startParams) throws IOException,
2126:                    StandardException {
2127:                StorageRandomAccessFile logControlFile = null;
2128:                ByteArrayInputStream bais = null;
2129:                DataInputStream dais = null;
2130:                logControlFile = privRandomAccessFile(logControlFileName, "r");
2131:                boolean upgradeNeeded = false;
2132:                long value = LogCounter.INVALID_LOG_INSTANT;
2133:                long onDiskChecksum = 0;
2134:                long controlFilelength = logControlFile.length();
2135:                byte barray[] = null;
2136:
2137:                try {
2138:                    // The length of the file is less than the minimum in any version
2139:                    // It is possibly hosed , no point in reading data from this file
2140:                    // skip reading checksum  control file is before 1.5
2141:                    if (controlFilelength < 16)
2142:                        onDiskChecksum = -1;
2143:                    else if (controlFilelength == 16) {
2144:                        barray = new byte[16];
2145:                        logControlFile.readFully(barray);
2146:                    } else if (controlFilelength > 16) {
2147:                        barray = new byte[(int) logControlFile.length() - 8];
2148:                        logControlFile.readFully(barray);
2149:                        onDiskChecksum = logControlFile.readLong();
2150:                        if (onDiskChecksum != 0) {
2151:                            checksum.reset();
2152:                            checksum.update(barray, 0, barray.length);
2153:                        }
2154:                    }
2155:
2156:                    if (onDiskChecksum == checksum.getValue()
2157:                            || onDiskChecksum == 0) {
2158:
2159:                        bais = new ByteArrayInputStream(barray);
2160:                        dais = new DataInputStream(bais);
2161:
2162:                        if (dais.readInt() != fid) {
2163:                            throw StandardException.newException(
2164:                                    SQLState.LOG_INCOMPATIBLE_FORMAT,
2165:                                    dataDirectory);
2166:                        }
2167:
2168:                        int obsoleteVersion = dais.readInt();
2169:                        value = dais.readLong();
2170:
2171:                        if (SanityManager.DEBUG) {
2172:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2173:                                SanityManager.DEBUG(LogToFile.DBG_FLAG,
2174:                                        "log control file ckp instance = "
2175:                                                + LogCounter
2176:                                                        .toDebugString(value));
2177:                        }
2178:
2179:                        // from version 1.5 onward, we added an int for storing JBMS
2180:                        // version and an int for storing checkpoint interval
2181:                        // and log switch interval
2182:                        onDiskMajorVersion = dais.readInt();
2183:                        onDiskMinorVersion = dais.readInt();
2184:                        int dbBuildNumber = dais.readInt();
2185:                        int flags = dais.readByte();
2186:
2187:                        // check if the database was booted previously at any time with
2188:                        // derby.system.durability=test mode
2189:                        // If yes, then on a boot error we report that this setting is
2190:                        // probably the cause for the error and also log a warning
2191:                        // in the derby.log that this mode was set previously
2192:                        wasDBInDurabilityTestModeNoSync = (flags & IS_DURABILITY_TESTMODE_NO_SYNC_FLAG) != 0;
2193:
2194:                        if (SanityManager.DEBUG) {
2195:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2196:                                SanityManager
2197:                                        .DEBUG(
2198:                                                LogToFile.DBG_FLAG,
2199:                                                "log control file, was derby.system.durability set to test = "
2200:                                                        + wasDBInDurabilityTestModeNoSync);
2201:                        }
2202:
2203:                        onDiskBeta = (flags & IS_BETA_FLAG) != 0;
2204:                        if (onDiskBeta) {
2205:                            // if is beta, can only be booted by exactly the same
2206:                            // version
2207:                            if (!jbmsVersion.isBeta()
2208:                                    || onDiskMajorVersion != jbmsVersion
2209:                                            .getMajorVersion()
2210:                                    || onDiskMinorVersion != jbmsVersion
2211:                                            .getMinorVersion()) {
2212:                                boolean forceBetaUpgrade = false;
2213:                                if (SanityManager.DEBUG) {
2214:                                    // give ourselves an out for this beta check for debugging purposes
2215:                                    if (SanityManager
2216:                                            .DEBUG_ON("forceBetaUpgrade")) {
2217:                                        Monitor
2218:                                                .logMessage("WARNING !! : forcing beta upgrade.");
2219:                                        forceBetaUpgrade = true;
2220:                                    }
2221:                                }
2222:
2223:                                if (!forceBetaUpgrade) {
2224:                                    throw StandardException.newException(
2225:                                            SQLState.LOG_CANNOT_UPGRADE_BETA,
2226:                                            dataDirectory, ProductVersionHolder
2227:                                                    .simpleVersionString(
2228:                                                            onDiskMajorVersion,
2229:                                                            onDiskMinorVersion,
2230:                                                            onDiskBeta));
2231:                                }
2232:                            }
2233:                        }
2234:
2235:                        // JBMS_VERSION must be numbered in a way so that it is ever
2236:                        // increasing.  We are backwards compatible but not forwards
2237:                        // compatible 
2238:                        //
2239:                        if (onDiskMajorVersion > jbmsVersion.getMajorVersion()
2240:                                || (onDiskMajorVersion == jbmsVersion
2241:                                        .getMajorVersion() && onDiskMinorVersion > jbmsVersion
2242:                                        .getMinorVersion())) {
2243:                            // don't need to worry about point release, no format
2244:                            // upgrade is allowed. 
2245:                            throw StandardException.newException(
2246:                                    SQLState.LOG_INCOMPATIBLE_VERSION,
2247:                                    dataDirectory, ProductVersionHolder
2248:                                            .simpleVersionString(
2249:                                                    onDiskMajorVersion,
2250:                                                    onDiskMinorVersion,
2251:                                                    onDiskBeta));
2252:                        }
2253:
2254:                        // Ensure that upgrade has been requested for a major or minor upgrade
2255:                        // maintaince (point) versions should not require an upgrade.
2256:                        if ((onDiskMajorVersion != jbmsVersion
2257:                                .getMajorVersion())
2258:                                || (onDiskMinorVersion != jbmsVersion
2259:                                        .getMinorVersion())) {
2260:                            upgradeNeeded = true;
2261:                        }
2262:                        // if checksum is zeros in  version > 3.5 file is hosed
2263:                        // except incase of upgrade from versions <= 3.5
2264:                        if (onDiskChecksum == 0
2265:                                && (!(onDiskMajorVersion <= 3 && onDiskMinorVersion <= 5) || onDiskMajorVersion == 0))
2266:                            value = LogCounter.INVALID_LOG_INSTANT;
2267:                    }
2268:                } finally {
2269:                    if (logControlFile != null)
2270:                        logControlFile.close();
2271:                    if (bais != null)
2272:                        bais.close();
2273:                    if (dais != null)
2274:                        dais.close();
2275:                }
2276:
2277:                if (upgradeNeeded) {
2278:                    if (Monitor.isFullUpgrade(startParams, ProductVersionHolder
2279:                            .simpleVersionString(onDiskMajorVersion,
2280:                                    onDiskMinorVersion, onDiskBeta))) {
2281:
2282:                        onDiskMajorVersion = jbmsVersion.getMajorVersion();
2283:                        onDiskMinorVersion = jbmsVersion.getMinorVersion();
2284:                        onDiskBeta = jbmsVersion.isBeta();
2285:
2286:                        // Write out the new log control file with the new
2287:                        // version, the database has been upgraded
2288:
2289:                        if (!writeControlFile(logControlFileName, value)) {
2290:                            throw StandardException.newException(
2291:                                    SQLState.LOG_CONTROL_FILE,
2292:                                    logControlFileName);
2293:                        }
2294:                    }
2295:                }
2296:
2297:                return value;
2298:
2299:            }
2300:
2301:            /**
2302:             * Create the directory where transaction log should go.
2303:             * @exception StandardException Standard Error Policy
2304:             */
2305:            private void createLogDirectory() throws StandardException {
2306:                StorageFile logDir = logStorageFactory
2307:                        .newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
2308:
2309:                if (privExists(logDir)) {
2310:                    // make sure log directory is empty.
2311:                    String[] logfiles = privList(logDir);
2312:                    if (logfiles != null) {
2313:                        if (logfiles.length != 0) {
2314:                            throw StandardException.newException(
2315:                                    SQLState.LOG_SEGMENT_EXIST, logDir
2316:                                            .getPath());
2317:                        }
2318:                    }
2319:
2320:                } else {
2321:                    // create the log directory.
2322:                    if (!privMkdirs(logDir)) {
2323:                        throw StandardException.newException(
2324:                                SQLState.LOG_SEGMENT_NOT_EXIST, logDir
2325:                                        .getPath());
2326:                    }
2327:                }
2328:            }
2329:
2330:            /*
2331:            	Return the directory the log should go.
2332:
2333:            	<P> MT- read only
2334:            	@exception StandardException Cloudscape Standard Error Policy
2335:             */
2336:            public StorageFile getLogDirectory() throws StandardException {
2337:                StorageFile logDir = null;
2338:
2339:                logDir = logStorageFactory
2340:                        .newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
2341:
2342:                if (!privExists(logDir)) {
2343:                    throw StandardException.newException(
2344:                            SQLState.LOG_SEGMENT_NOT_EXIST, logDir.getPath());
2345:                }
2346:
2347:                return logDir;
2348:            }
2349:
2350:            public String getCanonicalLogPath() {
2351:                if (logDevice == null)
2352:                    return null;
2353:                else {
2354:                    try {
2355:                        return logStorageFactory.getCanonicalName();
2356:                    } catch (IOException ioe) {
2357:                        return null;
2358:                        // file not found
2359:                    }
2360:                }
2361:            }
2362:
2363:            /**
2364:            	Return the control file name 
2365:
2366:            	<P> MT- read only
2367:             */
2368:            private StorageFile getControlFileName() throws StandardException {
2369:                return logStorageFactory.newStorageFile(getLogDirectory(),
2370:                        "log.ctrl");
2371:            }
2372:
2373:            /**
2374:            	Return the mirror control file name 
2375:
2376:            	<P> MT- read only
2377:             */
2378:            private StorageFile getMirrorControlFileName()
2379:                    throws StandardException {
2380:                return logStorageFactory.newStorageFile(getLogDirectory(),
2381:                        "logmirror.ctrl");
2382:            }
2383:
2384:            /**
2385:            	Given a log file number, return its file name 
2386:
2387:            	<P> MT- read only
2388:             */
2389:            private StorageFile getLogFileName(long filenumber)
2390:                    throws StandardException {
2391:                return logStorageFactory.newStorageFile(getLogDirectory(),
2392:                        "log" + filenumber + ".dat");
2393:            }
2394:
2395:            /*
2396:            	Find a checkpoint log record at the checkpointInstant
2397:
2398:            	<P> MT- read only
2399:             */
2400:            private CheckpointOperation findCheckpoint(long checkpointInstant,
2401:                    FileLogger logger) throws IOException, StandardException,
2402:                    ClassNotFoundException {
2403:                StreamLogScan scan = (StreamLogScan) openForwardsScan(
2404:                        checkpointInstant, (LogInstant) null);
2405:
2406:                // estimated size of a checkpoint log record, which contains 3 longs
2407:                // and assorted other log record overhead
2408:                Loggable lop = logger.readLogRecord(scan, 100);
2409:
2410:                scan.close();
2411:
2412:                if (lop instanceof  CheckpointOperation)
2413:                    return (CheckpointOperation) lop;
2414:                else
2415:                    return null;
2416:            }
2417:
2418:            /*
2419:             * Functions to help the Logger open a log scan on the log.
2420:             */
2421:
2422:            /**
2423:            	Scan backward from start position.
2424:
2425:            	<P> MT- read only
2426:
2427:            	@exception IOException cannot access the log
2428:            	@exception StandardException Standard Cloudscape error policy
2429:             */
2430:            protected LogScan openBackwardsScan(long startAt, LogInstant stopAt)
2431:                    throws IOException, StandardException {
2432:                checkCorrupt();
2433:
2434:                // backward from end of log
2435:                if (startAt == LogCounter.INVALID_LOG_INSTANT)
2436:                    return openBackwardsScan(stopAt);
2437:
2438:                // ensure any buffered data is written to the actual file
2439:                flushBuffer(LogCounter.getLogFileNumber(startAt), LogCounter
2440:                        .getLogFilePosition(startAt));
2441:
2442:                return new Scan(this , startAt, stopAt, Scan.BACKWARD);
2443:            }
2444:
2445:            /**
2446:            	Scan backward from end of log.
2447:            	<P> MT- read only
2448:
2449:            	@exception IOException cannot access the log
2450:            	@exception StandardException Standard Cloudscape error policy
2451:             */
2452:            protected LogScan openBackwardsScan(LogInstant stopAt)
2453:                    throws IOException, StandardException {
2454:                checkCorrupt();
2455:
2456:                // current instant log instant of the next log record to be
2457:                // written out, which is at the end of the log
2458:                // ensure any buffered data is written to the actual file
2459:                long startAt;
2460:                synchronized (this ) {
2461:                    // flush the whole buffer to ensure the complete
2462:                    // end of log is in the file.
2463:                    logOut.flushLogAccessFile();
2464:                    startAt = currentInstant();
2465:                }
2466:
2467:                return new Scan(this , startAt, stopAt,
2468:                        Scan.BACKWARD_FROM_LOG_END);
2469:            }
2470:
2471:            /**
2472:              @see LogFactory#openFlushedScan
2473:              @exception StandardException Ooops.
2474:             */
2475:            public ScanHandle openFlushedScan(DatabaseInstant start,
2476:                    int groupsIWant) throws StandardException {
2477:                return new FlushedScanHandle(this , start, groupsIWant);
2478:            }
2479:
2480:            /**
2481:            	Scan Forward from start position.
2482:
2483:            	<P> MT- read only
2484:
2485:            	@param startAt - if startAt == INVALID_LOG_INSTANT,
2486:            		start from the beginning of the log. Otherwise, start scan from startAt.
2487:            	@param stopAt - if not null, stop at this log instant (inclusive).
2488:            		Otherwise, stop at the end of the log
2489:
2490:            	@exception IOException cannot access the log
2491:            	@exception StandardException Standard Cloudscape error policy
2492:             */
2493:            protected LogScan openForwardsScan(long startAt, LogInstant stopAt)
2494:                    throws IOException, StandardException {
2495:                checkCorrupt();
2496:
2497:                if (startAt == LogCounter.INVALID_LOG_INSTANT) {
2498:                    startAt = firstLogInstant();
2499:                }
2500:
2501:                // ensure any buffered data is written to the actual file
2502:                if (stopAt != null) {
2503:                    LogCounter stopCounter = (LogCounter) stopAt;
2504:                    flushBuffer(stopCounter.getLogFileNumber(), stopCounter
2505:                            .getLogFilePosition());
2506:                } else {
2507:                    synchronized (this ) {
2508:                        if (logOut != null)
2509:                            // flush to the end of the log
2510:                            logOut.flushLogAccessFile();
2511:                    }
2512:                }
2513:
2514:                return new Scan(this , startAt, stopAt, Scan.FORWARD);
2515:            }
2516:
2517:            /*
2518:             * Methods to help a log scan switch from one log file to the next 
2519:             */
2520:
2521:            /**
2522:            	Open a log file and position the file at the beginning.
2523:            	Used by scan to switch to the next log file
2524:
2525:            	<P> MT- read only
2526:
2527:            	@exception StandardException Standard Cloudscape error policy
2528:            	@exception IOException cannot access the log at the new position.
2529:             */
2530:            protected StorageRandomAccessFile getLogFileAtBeginning(
2531:                    long filenumber) throws IOException, StandardException {
2532:                long instant = LogCounter.makeLogInstantAsLong(filenumber,
2533:                        LOG_FILE_HEADER_SIZE);
2534:                return getLogFileAtPosition(instant);
2535:            }
2536:
2537:            /**
2538:            	Get a read-only handle to the log file positioned at the stated position
2539:
2540:            	<P> MT- read only
2541:
2542:            	@return null if file does not exist or of the wrong format
2543:            	@exception IOException cannot access the log at the new position.
2544:            	@exception StandardException Standard Cloudscape error policy
2545:             */
2546:            protected StorageRandomAccessFile getLogFileAtPosition(
2547:                    long logInstant) throws IOException, StandardException {
2548:                checkCorrupt();
2549:
2550:                long filenum = LogCounter.getLogFileNumber(logInstant);
2551:                long filepos = LogCounter.getLogFilePosition(logInstant);
2552:
2553:                StorageFile fileName = getLogFileName(filenum);
2554:                if (!privExists(fileName)) {
2555:                    if (SanityManager.DEBUG) {
2556:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2557:                            SanityManager.DEBUG(LogToFile.DBG_FLAG, fileName
2558:                                    .getPath()
2559:                                    + " does not exist");
2560:                    }
2561:
2562:                    return null;
2563:                }
2564:
2565:                StorageRandomAccessFile log = null;
2566:
2567:                try {
2568:                    log = privRandomAccessFile(fileName, "r");
2569:
2570:                    // verify that the log file is of the right format
2571:                    if (!verifyLogFormat(log, filenum)) {
2572:                        if (SanityManager.DEBUG) {
2573:                            if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
2574:                                SanityManager
2575:                                        .DEBUG(LogToFile.DBG_FLAG, fileName
2576:                                                .getPath()
2577:                                                + " format mismatch");
2578:                        }
2579:
2580:                        log.close();
2581:                        log = null;
2582:                    } else {
2583:                        log.seek(filepos);
2584:                    }
2585:                } catch (IOException ioe) {
2586:                    try {
2587:                        if (log != null) {
2588:                            log.close();
2589:                            log = null;
2590:                        }
2591:
2592:                        if (SanityManager.DEBUG) {
2593:                            SanityManager.THROWASSERT("cannot get to position "
2594:                                    + filepos + " for log file "
2595:                                    + fileName.getPath(), ioe);
2596:                        }
2597:                    } catch (IOException ioe2) {
2598:                    }
2599:                    throw ioe;
2600:                }
2601:
2602:                return log;
2603:
2604:            }
2605:
2606:            /*
2607:             ** Methods of ModuleControl
2608:             */
2609:
2610:            public boolean canSupport(Properties startParams) {
2611:                String runtimeLogAttributes = startParams
2612:                        .getProperty(LogFactory.RUNTIME_ATTRIBUTES);
2613:                if (runtimeLogAttributes != null) {
2614:                    if (runtimeLogAttributes.equals(LogFactory.RT_READONLY))
2615:                        return false;
2616:                }
2617:
2618:                return true;
2619:            }
2620:
2621:            /**
2622:            	Boot up the log factory.
2623:            	<P> MT- caller provide synchronization
2624:
2625:            	@exception StandardException log factory cannot start up
2626:             */
2627:            public void boot(boolean create, Properties startParams)
2628:                    throws StandardException {
2629:                dataDirectory = startParams.getProperty(PersistentService.ROOT);
2630:
2631:                logDevice = startParams.getProperty(Attribute.LOG_DEVICE);
2632:                if (logDevice != null) {
2633:                    // in case the user specifies logDevice in URL form
2634:                    String logDeviceURL = null;
2635:                    try {
2636:                        URL url = new URL(logDevice);
2637:                        logDeviceURL = url.getFile();
2638:                    } catch (MalformedURLException ex) {
2639:                    }
2640:                    if (logDeviceURL != null)
2641:                        logDevice = logDeviceURL;
2642:                }
2643:
2644:                if (create) {
2645:                    getLogStorageFactory();
2646:                    createLogDirectory();
2647:
2648:                } else {
2649:                    // check if the database is being restored from the backup,
2650:                    // if it is then restore the logs.
2651:                    if (!restoreLogs(startParams)) {
2652:                        // set the log storage factory.
2653:                        getLogStorageFactory();
2654:                        if (logDevice != null) {
2655:                            // Make sure we find the log, do not assume 
2656:                            // it is OK that the log is not there because 
2657:                            // it could be a user typo(like when users edit
2658:                            // service.properties to change the log device 
2659:                            // while restoring from backups using OS copy.
2660:                            StorageFile checklogDir = logStorageFactory
2661:                                    .newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
2662:                            if (!privExists(checklogDir)) {
2663:                                throw StandardException.newException(
2664:                                        SQLState.LOG_FILE_NOT_FOUND,
2665:                                        checklogDir.getPath());
2666:
2667:                            }
2668:                        }
2669:                    }
2670:                }
2671:
2672:                //if user does not set the right value for the log buffer size,
2673:                //default value is used instead.
2674:                logBufferSize = PropertyUtil
2675:                        .getSystemInt(
2676:                                org.apache.derby.iapi.reference.Property.LOG_BUFFER_SIZE,
2677:                                LOG_BUFFER_SIZE_MIN, LOG_BUFFER_SIZE_MAX,
2678:                                DEFAULT_LOG_BUFFER_SIZE);
2679:                jbmsVersion = Monitor.getMonitor().getEngineVersion();
2680:
2681:                String logArchiveMode = startParams
2682:                        .getProperty(Property.LOG_ARCHIVE_MODE);
2683:                logArchived = Boolean.valueOf(logArchiveMode).booleanValue();
2684:
2685:                //get log factorty properties if any set in derby.properties
2686:                getLogFactoryProperties(null);
2687:
2688:                /* check if the storage factory supports write sync(rws).  If so, use it unless
2689:                 * derby.storage.fileSyncTransactionLog property is set true by user.
2690:                 */
2691:
2692:                if (logStorageFactory.supportsRws()) {
2693:                    //write sync can be used in the jvm that database is running on.
2694:                    //disable write sync if derby.storage.fileSyncTransactionLog is true
2695:                    isWriteSynced = !(PropertyUtil
2696:                            .getSystemBoolean(Property.FILESYNC_TRANSACTION_LOG));
2697:                } else {
2698:                    isWriteSynced = false;
2699:                }
2700:
2701:                // If derby.system.durability=test is set,then set flag to 
2702:                // disable sync of log records at commit and log file before 
2703:                // data page makes it to disk
2704:                if (Property.DURABILITY_TESTMODE_NO_SYNC
2705:                        .equalsIgnoreCase(PropertyUtil
2706:                                .getSystemProperty(Property.DURABILITY_PROPERTY))) {
2707:                    // disable syncing of log.
2708:                    logNotSynced = true;
2709:                    //if log not being synced;files shouldn't be open in write sync mode
2710:                    isWriteSynced = false;
2711:                } else if (Performance.MEASURE) {
2712:                    // development build only feature, must by hand set the 
2713:                    // Performance.MEASURE variable and rebuild.  Useful during
2714:                    // development to compare/contrast effect of syncing, release
2715:                    // users can use the above relaxed durability option to disable
2716:                    // all syncing.  
2717:
2718:                    logNotSynced = PropertyUtil
2719:                            .getSystemBoolean(Property.STORAGE_LOG_NOT_SYNCED);
2720:
2721:                    if (logNotSynced) {
2722:                        isWriteSynced = false;
2723:                        Monitor.logMessage("Performance.logNotSynced = true");
2724:                    }
2725:                }
2726:
2727:                // try to access the log
2728:                // if it doesn't exist, create it.
2729:                // if it does exist, run recovery
2730:
2731:                boolean createNewLog = create;
2732:
2733:                if (SanityManager.DEBUG)
2734:                    SanityManager.ASSERT(fid != -1, "invalid log format Id");
2735:
2736:                checkpointInstant = LogCounter.INVALID_LOG_INSTANT;
2737:                try {
2738:                    StorageFile logControlFileName = getControlFileName();
2739:
2740:                    StorageFile logFile;
2741:
2742:                    if (!createNewLog) {
2743:                        if (privExists(logControlFileName)) {
2744:                            checkpointInstant = readControlFile(
2745:                                    logControlFileName, startParams);
2746:
2747:                            // in case system was running previously with 
2748:                            // derby.system.durability=test then print a message 
2749:                            // to the derby log
2750:                            if (wasDBInDurabilityTestModeNoSync) {
2751:                                // print message stating that the database was
2752:                                // previously atleast at one time running with
2753:                                // derby.system.durability=test mode
2754:                                Monitor
2755:                                        .logMessage(MessageService
2756:                                                .getTextMessage(
2757:                                                        MessageId.LOG_WAS_IN_DURABILITY_TESTMODE_NO_SYNC,
2758:                                                        Property.DURABILITY_PROPERTY,
2759:                                                        Property.DURABILITY_TESTMODE_NO_SYNC));
2760:                            }
2761:
2762:                            if (checkpointInstant == LogCounter.INVALID_LOG_INSTANT
2763:                                    && privExists(getMirrorControlFileName())) {
2764:                                checkpointInstant = readControlFile(
2765:                                        getMirrorControlFileName(), startParams);
2766:                            }
2767:
2768:                        } else if (logDevice != null) {
2769:                            // Do not throw this error if logDevice is null because
2770:                            // in a read only configuration, it is acceptable
2771:                            // to not have a log directory.  But clearly, if the
2772:                            // logDevice property is set, then it should be there.
2773:                            throw StandardException.newException(
2774:                                    SQLState.LOG_FILE_NOT_FOUND,
2775:                                    logControlFileName.getPath());
2776:                        }
2777:
2778:                        if (checkpointInstant != LogCounter.INVALID_LOG_INSTANT)
2779:                            logFileNumber = LogCounter
2780:                                    .getLogFileNumber(checkpointInstant);
2781:                        else
2782:                            logFileNumber = 1;
2783:
2784:                        logFile = getLogFileName(logFileNumber);
2785:
2786:                        // if log file is not there or if it is of the wrong format, create a
2787:                        // brand new log file and do not attempt to recover the database
2788:
2789:                        if (!privExists(logFile)) {
2790:                            if (logDevice != null) {
2791:                                throw StandardException.newException(
2792:                                        SQLState.LOG_FILE_NOT_FOUND,
2793:                                        logControlFileName.getPath());
2794:                            }
2795:
2796:                            logErrMsg(MessageService.getTextMessage(
2797:                                    MessageId.LOG_MAYBE_INCONSISTENT, logFile
2798:                                            .getPath()));
2799:
2800:                            createNewLog = true;
2801:                        } else if (!verifyLogFormat(logFile, logFileNumber)) {
2802:                            Monitor.logTextMessage(
2803:                                    MessageId.LOG_DELETE_INCOMPATIBLE_FILE,
2804:                                    logFile);
2805:
2806:                            // blow away the log file if possible
2807:                            if (!privDelete(logFile) && logFileNumber == 1) {
2808:                                logErrMsgForDurabilityTestModeNoSync();
2809:                                throw StandardException.newException(
2810:                                        SQLState.LOG_INCOMPATIBLE_FORMAT,
2811:                                        dataDirectory);
2812:                            }
2813:
2814:                            // If logFileNumber > 1, we are not going to write that 
2815:                            // file just yet.  Just leave it be and carry on.  Maybe 
2816:                            // when we get there it can be deleted.
2817:
2818:                            createNewLog = true;
2819:                        }
2820:                    }
2821:
2822:                    if (createNewLog) {
2823:                        // brand new log.  Start from log file number 1.
2824:
2825:                        // create or overwrite the log control file with an invalid
2826:                        // checkpoint instant since there is no checkpoint yet
2827:                        if (writeControlFile(logControlFileName,
2828:                                LogCounter.INVALID_LOG_INSTANT)) {
2829:                            firstLogFileNumber = 1;
2830:                            logFileNumber = 1;
2831:                            if (SanityManager.DEBUG) {
2832:                                if (SanityManager
2833:                                        .DEBUG_ON(TEST_MAX_LOGFILE_NUMBER)) {
2834:                                    // set the value to be two less than max possible
2835:                                    // log number, test case will perform some ops to 
2836:                                    // hit the max number case.
2837:                                    firstLogFileNumber = LogCounter.MAX_LOGFILE_NUMBER - 2;
2838:
2839:                                    logFileNumber = LogCounter.MAX_LOGFILE_NUMBER - 2;
2840:                                }
2841:                            }
2842:                            logFile = getLogFileName(logFileNumber);
2843:
2844:                            if (privExists(logFile)) {
2845:                                // this log file maybe there because the system may have
2846:                                // crashed right after a log switch but did not write
2847:                                // out any log record
2848:                                Monitor.logTextMessage(
2849:                                        MessageId.LOG_DELETE_OLD_FILE, logFile);
2850:
2851:                                if (!privDelete(logFile)) {
2852:                                    logErrMsgForDurabilityTestModeNoSync();
2853:                                    throw StandardException.newException(
2854:                                            SQLState.LOG_INCOMPATIBLE_FORMAT,
2855:                                            dataDirectory);
2856:                                }
2857:                            }
2858:
2859:                            // don't need to try to delete it, we know it isn't there
2860:                            firstLog = privRandomAccessFile(logFile, "rw");
2861:
2862:                            if (!initLogFile(firstLog, logFileNumber,
2863:                                    LogCounter.INVALID_LOG_INSTANT)) {
2864:                                throw StandardException.newException(
2865:                                        SQLState.LOG_SEGMENT_NOT_EXIST, logFile
2866:                                                .getPath());
2867:                            }
2868:
2869:                            endPosition = firstLog.getFilePointer();
2870:                            lastFlush = firstLog.getFilePointer();
2871:
2872:                            //if write sync is true , prellocate the log file
2873:                            //and reopen the file in rws mode.
2874:                            if (isWriteSynced) {
2875:                                //extend the file by wring zeros to it
2876:                                preAllocateNewLogFile(firstLog);
2877:                                firstLog.close();
2878:                                firstLog = openLogFileInWriteMode(logFile);
2879:                                //postion the log at the current log end postion
2880:                                firstLog.seek(endPosition);
2881:                            }
2882:
2883:                            if (SanityManager.DEBUG) {
2884:                                SanityManager.ASSERT(
2885:                                        endPosition == LOG_FILE_HEADER_SIZE,
2886:                                        "empty log file has wrong size");
2887:                            }
2888:                        } else {
2889:                            // read only database
2890:                            ReadOnlyDB = true;
2891:                            logOut = null;
2892:                            firstLog = null;
2893:                        }
2894:
2895:                        recoveryNeeded = false;
2896:                    } else {
2897:                        // log file exist, need to run recovery
2898:                        recoveryNeeded = true;
2899:                    }
2900:
2901:                } catch (IOException ioe) {
2902:                    throw Monitor.exceptionStartingModule(ioe);
2903:                }
2904:
2905:                // Number of the log file that can be created in Derby is increased from 
2906:                // 2^22 -1 to 2^31 -1 in version 10.1. But if the database is running on
2907:                // engines 10.1 or above on a  softupgrade  from versions 10.0 or
2908:                // before, the max log file number  that can be created is  
2909:                // still limited to 2^22 -1, because users can revert back to older  versions 
2910:                // which does not have logic to handle a log file number greater than
2911:                // 2^22-1. 
2912:
2913:                // set max possible log file number to derby 10.0 limit, if the database is not 
2914:                // fully upgraded to or created in version 10.1 or above. 
2915:                if (!checkVersion(RawStoreFactory.DERBY_STORE_MAJOR_VERSION_10,
2916:                        RawStoreFactory.DERBY_STORE_MINOR_VERSION_1))
2917:                    maxLogFileNumber = LogCounter.DERBY_10_0_MAX_LOGFILE_NUMBER;
2918:
2919:            } // end of boot
2920:
2921:            private void getLogStorageFactory() throws StandardException {
2922:                if (logDevice == null) {
2923:                    DataFactory df = (DataFactory) Monitor.findServiceModule(
2924:                            this , DataFactory.MODULE);
2925:                    logStorageFactory = (WritableStorageFactory) df
2926:                            .getStorageFactory();
2927:                } else {
2928:                    try {
2929:                        PersistentService ps = Monitor.getMonitor()
2930:                                .getServiceType(this );
2931:                        logStorageFactory = (WritableStorageFactory) ps
2932:                                .getStorageFactoryInstance(false, logDevice,
2933:                                        null, null);
2934:                    } catch (IOException ioe) {
2935:                        if (SanityManager.DEBUG)
2936:                            SanityManager.NOTREACHED();
2937:                        throw StandardException.newException(
2938:                                SQLState.LOG_FILE_NOT_FOUND, ioe, logDevice);
2939:                    }
2940:                }
2941:            } // end of getLogStorageFactory
2942:
2943:            /**
2944:                Stop the log factory
2945:            	<P> MT- caller provide synchronization
2946:            	(RESOLVE: this should be called AFTER dataFactory and transFactory are
2947:            	stopped)
2948:             */
2949:            public void stop() {
2950:
2951:                // stop our checkpoint 
2952:                if (checkpointDaemon != null) {
2953:                    checkpointDaemon.unsubscribe(myClientNumber);
2954:                    checkpointDaemon.stop();
2955:                }
2956:
2957:                synchronized (this ) {
2958:                    stopped = true;
2959:
2960:                    if (logOut != null) {
2961:                        try {
2962:                            logOut.flushLogAccessFile();
2963:                            logOut.close();
2964:                        } catch (IOException ioe) {
2965:                        } catch (StandardException se) {
2966:                        }
2967:                        logOut = null;
2968:                    }
2969:                }
2970:
2971:                if (SanityManager.DEBUG && Performance.MEASURE
2972:                        && mon_LogSyncStatistics) {
2973:                    Monitor.logMessage("number of times someone waited = "
2974:                            + mon_numLogFlushWaits
2975:                            + "\nnumber of times flush is called = "
2976:                            + mon_flushCalls + "\nnumber of sync is called = "
2977:                            + mon_syncCalls
2978:                            + "\ntotal number of bytes written to log = "
2979:                            + LogAccessFile.mon_numBytesToLog
2980:                            + "\ntotal number of writes to log file = "
2981:                            + LogAccessFile.mon_numWritesToLog);
2982:                }
2983:
2984:                // delete obsolete log files,left around by earlier crashes
2985:                if (corrupt == null && !logArchived() && !keepAllLogs
2986:                        && !ReadOnlyDB)
2987:                    deleteObsoleteLogfiles();
2988:
2989:                if (logDevice != null)
2990:                    logStorageFactory.shutdown();
2991:                logStorageFactory = null;
2992:            }
2993:
2994:            /* delete the log files, that might have been left around if we crashed
2995:             * immediately after the checkpoint before truncations of logs completed.
2996:             * see bug no: 3519 , for more details.
2997:             */
2998:
2999:            private void deleteObsoleteLogfiles() {
3000:                StorageFile logDir;
3001:                //find the first  log file number that is  useful
3002:                long firstLogNeeded = getFirstLogNeeded(currentCheckpoint);
3003:                if (firstLogNeeded == -1)
3004:                    return;
3005:
3006:                // when  backup is in progress, log files that are yet to
3007:                // be copied to the backup should not be deleted,  even 
3008:                // if they are not required  for crash recovery.
3009:                if (backupInProgress) {
3010:                    long logFileNeededForBackup = logFileToBackup;
3011:                    // check if the log file number is yet to be copied 
3012:                    // to the backup is less than the log file required 
3013:                    // for crash recovery, if it is then make the first 
3014:                    // log file that should not be deleted is the log file 
3015:                    // that is yet to  be copied to the backup.  
3016:                    if (logFileNeededForBackup < firstLogNeeded)
3017:                        firstLogNeeded = logFileNeededForBackup;
3018:                }
3019:
3020:                try {
3021:                    logDir = getLogDirectory();
3022:                } catch (StandardException se) {
3023:                    if (SanityManager.DEBUG)
3024:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3025:                            SanityManager.DEBUG(DBG_FLAG,
3026:                                    "error opening log segment dir");
3027:                    return;
3028:                }
3029:
3030:                String[] logfiles = privList(logDir);
3031:                if (logfiles != null) {
3032:                    StorageFile uselessLogFile = null;
3033:                    long fileNumber;
3034:                    for (int i = 0; i < logfiles.length; i++) {
3035:                        // delete the log files that are not needed any more
3036:                        if (logfiles[i].startsWith("log")
3037:                                && logfiles[i].endsWith(".dat")) {
3038:                            fileNumber = Long.parseLong(logfiles[i].substring(
3039:                                    3, (logfiles[i].length() - 4)));
3040:                            if (fileNumber < firstLogNeeded) {
3041:                                uselessLogFile = logStorageFactory
3042:                                        .newStorageFile(logDir, logfiles[i]);
3043:                                if (privDelete(uselessLogFile)) {
3044:                                    if (SanityManager.DEBUG) {
3045:                                        if (SanityManager
3046:                                                .DEBUG_ON(LogToFile.DBG_FLAG))
3047:                                            SanityManager.DEBUG(DBG_FLAG,
3048:                                                    "truncating obsolete log file "
3049:                                                            + uselessLogFile
3050:                                                                    .getPath());
3051:                                    }
3052:                                } else {
3053:                                    if (SanityManager.DEBUG) {
3054:                                        if (SanityManager
3055:                                                .DEBUG_ON(LogToFile.DBG_FLAG))
3056:                                            SanityManager.DEBUG(DBG_FLAG,
3057:                                                    "Fail to truncate obsolete log file "
3058:                                                            + uselessLogFile
3059:                                                                    .getPath());
3060:                                    }
3061:                                }
3062:                            }
3063:                        }
3064:                    }
3065:                }
3066:            }
3067:
3068:            /*
3069:             * Serviceable methods
3070:             */
3071:
3072:            public boolean serviceASAP() {
3073:                return false;
3074:            }
3075:
3076:            // @return true, if this work needs to be done on a user thread immediately
3077:            public boolean serviceImmediately() {
3078:                return false;
3079:            }
3080:
3081:            public void getLogFactoryProperties(PersistentSet set)
3082:                    throws StandardException {
3083:                String lsInterval;
3084:                String cpInterval;
3085:                if (set == null) {
3086:                    lsInterval = PropertyUtil
3087:                            .getSystemProperty(org.apache.derby.iapi.reference.Property.LOG_SWITCH_INTERVAL);
3088:                    cpInterval = PropertyUtil
3089:                            .getSystemProperty(org.apache.derby.iapi.reference.Property.CHECKPOINT_INTERVAL);
3090:                } else {
3091:                    lsInterval = PropertyUtil
3092:                            .getServiceProperty(
3093:                                    set,
3094:                                    org.apache.derby.iapi.reference.Property.LOG_SWITCH_INTERVAL);
3095:                    cpInterval = PropertyUtil
3096:                            .getServiceProperty(
3097:                                    set,
3098:                                    org.apache.derby.iapi.reference.Property.CHECKPOINT_INTERVAL);
3099:                }
3100:
3101:                /* log switch interval */
3102:                if (lsInterval != null) {
3103:                    logSwitchInterval = Integer.parseInt(lsInterval);
3104:                    // make sure checkpoint and log switch interval are within range
3105:                    if (logSwitchInterval < LOG_SWITCH_INTERVAL_MIN)
3106:                        logSwitchInterval = LOG_SWITCH_INTERVAL_MIN;
3107:                    else if (logSwitchInterval > LOG_SWITCH_INTERVAL_MAX)
3108:                        logSwitchInterval = LOG_SWITCH_INTERVAL_MAX;
3109:                }
3110:
3111:                /* checkpoint interval */
3112:                if (cpInterval != null) {
3113:                    checkpointInterval = Integer.parseInt(cpInterval);
3114:                    if (checkpointInterval < CHECKPOINT_INTERVAL_MIN)
3115:                        checkpointInterval = CHECKPOINT_INTERVAL_MIN;
3116:                    else if (checkpointInterval > CHECKPOINT_INTERVAL_MAX)
3117:                        checkpointInterval = CHECKPOINT_INTERVAL_MAX;
3118:                }
3119:            }
3120:
3121:            public int performWork(ContextManager context) {
3122:                synchronized (this ) {
3123:                    if (corrupt != null)
3124:                        return Serviceable.DONE; // don't do this again.
3125:                }
3126:
3127:                // check to see if checkpointInterval and logSwitchInterval has changed
3128:                AccessFactory af = (AccessFactory) Monitor.getServiceModule(
3129:                        this , AccessFactory.MODULE);
3130:
3131:                try {
3132:                    if (af != null) {
3133:                        TransactionController tc = null;
3134:                        try {
3135:                            tc = af.getAndNameTransaction(context,
3136:                                    AccessFactoryGlobals.SYS_TRANS_NAME);
3137:
3138:                            getLogFactoryProperties(tc);
3139:                        } finally {
3140:                            if (tc != null)
3141:                                tc.commit();
3142:                        }
3143:                    }
3144:
3145:                    // checkpoint will start its own internal transaction on the current
3146:                    // context.
3147:                    rawStoreFactory.checkpoint();
3148:                } catch (StandardException se) {
3149:                    Monitor.logTextMessage(MessageId.LOG_CHECKPOINT_EXCEPTION);
3150:                    logErrMsg(se);
3151:                } catch (ShutdownException shutdown) {
3152:                    // If we are shutting down, just ignore the error and let the 
3153:                    // system go down without printing errors to the log.
3154:                }
3155:
3156:                checkpointDaemonCalled = false;
3157:
3158:                return Serviceable.DONE;
3159:            }
3160:
3161:            /*
3162:             ** Implementation specific methods
3163:             */
3164:
3165:            /**
3166:            	Append length bytes of data to the log prepended by a long log instant
3167:            	and followed by 4 bytes of length information.
3168:
3169:            	<P>
3170:            	This method is synchronized to ensure log records are added sequentially
3171:            	to the end of the log.
3172:
3173:            	<P>MT- single threaded through this log factory.  Log records are
3174:            	appended one at a time.
3175:
3176:            	@exception StandardException Log Full.
3177:
3178:             */
3179:            protected long appendLogRecord(byte[] data, int offset, int length,
3180:                    byte[] optionalData, int optionalDataOffset,
3181:                    int optionalDataLength) throws StandardException {
3182:                long instant;
3183:                boolean testIncompleteLogWrite = false;
3184:
3185:                if (ReadOnlyDB) {
3186:                    throw StandardException
3187:                            .newException(SQLState.LOG_READ_ONLY_DB_UPDATE);
3188:                }
3189:
3190:                if (length <= 0) {
3191:                    throw StandardException
3192:                            .newException(SQLState.LOG_ZERO_LENGTH_LOG_RECORD);
3193:                }
3194:
3195:                // RESOLVE: calculate checksum here
3196:                if (SanityManager.DEBUG) {
3197:                    if (SanityManager.DEBUG_ON(TEST_LOG_INCOMPLETE_LOG_WRITE)) {
3198:                        /// /// /// /// /// /// /// /// /// /// 
3199:                        //
3200:                        // go into this alternate route instead
3201:                        //
3202:                        /// /// /// /// /// /// /// /// /// /// 
3203:                        return logtest_appendPartialLogRecord(data, offset,
3204:                                length, optionalData, optionalDataOffset,
3205:                                optionalDataLength);
3206:
3207:                    }
3208:
3209:                }
3210:
3211:                try {
3212:                    if (SanityManager.DEBUG) {
3213:                        if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
3214:                            testLogFull(); // if log is 'full' this routine will throw an
3215:                        // exception 
3216:                    }
3217:
3218:                    synchronized (this ) {
3219:                        // has someone else found a problem in the raw store?
3220:                        if (corrupt != null) {
3221:                            throw StandardException.newException(
3222:                                    SQLState.LOG_STORE_CORRUPT, corrupt);
3223:                        }
3224:
3225:                        if (logOut == null) {
3226:                            throw StandardException
3227:                                    .newException(SQLState.LOG_NULL);
3228:                        }
3229:
3230:                        /*
3231:                         * NOTE!!
3232:                         *
3233:                         * subclass which logs special record to the stream depends on
3234:                         * the EXACT byte sequence of the following segment of code.  
3235:                         * If you change this, not only will you need to write upgrade
3236:                         * code for this class, you also need to find all the subclass
3237:                         * which write log record to log stream directly to make sure 
3238:                         * they are OK
3239:                         */
3240:
3241:                        // see if the log file is too big, if it is, switch to the next
3242:                        // log file
3243:                        if ((endPosition + LOG_RECORD_OVERHEAD + length) >= LogCounter.MAX_LOGFILE_SIZE) {
3244:                            switchLogFile();
3245:
3246:                            // still too big??  Giant log record?
3247:                            if ((endPosition + LOG_RECORD_OVERHEAD + length) >= LogCounter.MAX_LOGFILE_SIZE) {
3248:                                throw StandardException.newException(
3249:                                        SQLState.LOG_EXCEED_MAX_LOG_FILE_SIZE,
3250:                                        new Long(logFileNumber), new Long(
3251:                                                endPosition), new Long(length),
3252:                                        new Long(LogCounter.MAX_LOGFILE_SIZE));
3253:                            }
3254:                        }
3255:
3256:                        //reserve the space for the checksum log record
3257:                        endPosition += logOut.reserveSpaceForChecksum(length,
3258:                                logFileNumber, endPosition);
3259:
3260:                        // don't call currentInstant since we are already in a
3261:                        // synchronzied block 
3262:                        instant = LogCounter.makeLogInstantAsLong(
3263:                                logFileNumber, endPosition);
3264:
3265:                        logOut.writeLogRecord(length, instant, data, offset,
3266:                                optionalData, optionalDataOffset,
3267:                                optionalDataLength);
3268:
3269:                        if (optionalDataLength != 0) {
3270:                            if (SanityManager.DEBUG) {
3271:                                if (optionalData == null)
3272:                                    SanityManager
3273:                                            .THROWASSERT("optionalDataLength = "
3274:                                                    + optionalDataLength
3275:                                                    + " with null Optional data");
3276:
3277:                                if (optionalData.length < (optionalDataOffset + optionalDataLength))
3278:                                    SanityManager
3279:                                            .THROWASSERT("optionalDataLength = "
3280:                                                    + optionalDataLength
3281:                                                    + " optionalDataOffset = "
3282:                                                    + optionalDataOffset
3283:                                                    + " optionalData.length = "
3284:                                                    + optionalData.length);
3285:                            }
3286:                        }
3287:
3288:                        endPosition += (length + LOG_RECORD_OVERHEAD);
3289:                    }
3290:                } catch (IOException ioe) {
3291:                    throw markCorrupt(StandardException.newException(
3292:                            SQLState.LOG_FULL, ioe));
3293:                }
3294:
3295:                return instant;
3296:            }
3297:
3298:            /*
3299:             * Misc private functions to access the log
3300:             */
3301:
3302:            /**
3303:            	Get the current log instant - this is the log instant of the Next log
3304:            	record to be written out
3305:            	<P> MT - This method is synchronized to ensure that it always points to
3306:            	the end of a log record, not the middle of one. 
3307:             */
3308:            protected synchronized long currentInstant() {
3309:                return LogCounter.makeLogInstantAsLong(logFileNumber,
3310:                        endPosition);
3311:            }
3312:
3313:            protected synchronized long endPosition() {
3314:                return endPosition;
3315:            }
3316:
3317:            /**
3318:            	Return the current log file number.
3319:
3320:            	<P> MT - this method is synchronized so that
3321:            	it is not in the middle of being changed by swithLogFile
3322:             */
3323:            private synchronized long getLogFileNumber() {
3324:                return logFileNumber;
3325:            }
3326:
3327:            /** 
3328:            	Get the first valid log instant - this is the beginning of the first
3329:            	log file
3330:
3331:            	<P>MT- synchronized on this
3332:             */
3333:            private synchronized long firstLogInstant() {
3334:                return LogCounter.makeLogInstantAsLong(firstLogFileNumber,
3335:                        LOG_FILE_HEADER_SIZE);
3336:            }
3337:
3338:            /**
3339:            	Flush the log such that the log record written with the instant 
3340:                wherePosition is guaranteed to be on disk.
3341:
3342:            	<P>MT - only one flush is allowed to be taking place at any given time 
3343:            	(RESOLVE: right now it single thread thru the log factory while the log
3344:            	is frozen) 
3345:
3346:            	@exception StandardException cannot sync log file
3347:
3348:             */
3349:            protected void flush(long fileNumber, long wherePosition)
3350:                    throws StandardException {
3351:
3352:                long potentialLastFlush = 0;
3353:
3354:                synchronized (this ) {
3355:                    if (Performance.MEASURE)
3356:                        mon_flushCalls++;
3357:                    try {
3358:                        boolean waited;
3359:                        do {
3360:                            // THIS CORRUPT CHECK MUST BE FIRST, before any check that
3361:                            // sees if the log has already been flushed to this
3362:                            // point. This is based upon the assumption that every
3363:                            // dirty page in the cache must call flush() before it is
3364:                            // written out.  has someone else found a problem in the
3365:                            // raw store?
3366:
3367:                            if (corrupt != null) {
3368:                                throw StandardException.newException(
3369:                                        SQLState.LOG_STORE_CORRUPT, corrupt);
3370:                            }
3371:
3372:                            // now check if database is frozen
3373:                            while (isFrozen) {
3374:                                try {
3375:                                    wait();
3376:                                } catch (InterruptedException ie) {
3377:                                    throw StandardException.interrupt(ie);
3378:                                }
3379:                            }
3380:
3381:                            // if we are just testing to see to see the database is 
3382:                            // frozen or corrupt (wherePosition == INVALID_LOG_INSTANT)
3383:                            // then we can return now.
3384:                            // if the log file is already flushed up to where we are 
3385:                            // interested in, just return.
3386:                            if (wherePosition == LogCounter.INVALID_LOG_INSTANT
3387:                                    || fileNumber < logFileNumber
3388:                                    || wherePosition < lastFlush) {
3389:                                return;
3390:                            }
3391:
3392:                            // if we are not corrupt and we are in the middle of redo, 
3393:                            // we know the log record has already been flushed since we haven't written any log 
3394:                            // yet.
3395:                            if (recoveryNeeded && inRedo) {
3396:                                return;
3397:                            }
3398:
3399:                            if (SanityManager.DEBUG) {
3400:                                if (fileNumber > getLogFileNumber())
3401:                                    SanityManager
3402:                                            .THROWASSERT("trying to flush a file that is not there yet "
3403:                                                    + fileNumber
3404:                                                    + " "
3405:                                                    + logFileNumber);
3406:
3407:                                if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG)) {
3408:                                    SanityManager.DEBUG(DBG_FLAG,
3409:                                            "Flush log to " + wherePosition);
3410:                                }
3411:                            }
3412:
3413:                            // There could be multiple threads who wants to flush the 
3414:                            // log file, see if I can be the one.
3415:                            if (logBeingFlushed) {
3416:                                waited = true;
3417:                                try {
3418:                                    if (Performance.MEASURE)
3419:                                        mon_numLogFlushWaits++;
3420:                                    wait(); // release log semaphore to let non-flushing
3421:                                    // threads log stuff while all the flushing 
3422:                                    // threads wait.
3423:
3424:                                    // now we continue back to see if the sync
3425:                                    // we waited for, flushed the portion
3426:                                    // of log we are interested in.
3427:                                } catch (InterruptedException ie) {
3428:                                    throw StandardException.interrupt(ie);
3429:                                }
3430:                            } else {
3431:                                waited = false;
3432:
3433:                                // logBeingFlushed is false, I am flushing the log now.
3434:                                if (!isWriteSynced) {
3435:                                    // Flush any data from the buffered log
3436:                                    logOut.flushLogAccessFile();
3437:                                } else {
3438:                                    //add active buffers to dirty buffer list
3439:                                    //to flush to the disk.
3440:                                    logOut.switchLogBuffer();
3441:                                }
3442:
3443:                                potentialLastFlush = endPosition; // we will flush to to the end
3444:
3445:                                // once logBeingFlushed is set, need to release
3446:                                // the logBeingFlushed flag in finally block.
3447:                                logBeingFlushed = true;
3448:                            }
3449:
3450:                        } while (waited);
3451:                        // if I have waited, go down do loop again - hopefully,
3452:                        // someone else have already flushed it for me already.
3453:                    } catch (IOException ioe) {
3454:                        throw markCorrupt(StandardException.newException(
3455:                                SQLState.LOG_CANNOT_FLUSH, ioe, getLogFileName(
3456:                                        logFileNumber).getPath()));
3457:                    }
3458:                } // unfreeze log manager to accept more log records
3459:
3460:                boolean syncSuceed = false;
3461:                try {
3462:                    if (SanityManager.DEBUG) {
3463:                        SanityManager.ASSERT(logBeingFlushed,
3464:                                "flushing log without logBeingFlushed set");
3465:                        SanityManager.ASSERT(potentialLastFlush > 0,
3466:                                "potentialLastFlush not set");
3467:
3468:                        if (SanityManager.DEBUG_ON(TEST_LOG_FULL))
3469:                            testLogFull();
3470:
3471:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3472:                            SanityManager.DEBUG(DBG_FLAG, "Begin log sync...");
3473:                    }
3474:
3475:                    if (Performance.MEASURE)
3476:                        mon_syncCalls++;
3477:
3478:                    if (isWriteSynced) {
3479:                        //LogAccessFile.flushDirtyBuffers() will allow only one write
3480:                        //sync at a time, flush requests will get queued 
3481:                        logOut.flushDirtyBuffers();
3482:                    } else {
3483:                        if (!logNotSynced)
3484:                            logOut.syncLogAccessFile();
3485:                    }
3486:
3487:                    syncSuceed = true;
3488:
3489:                    if (SanityManager.DEBUG) {
3490:                        if (SanityManager.DEBUG_ON(LogToFile.DBG_FLAG))
3491:                            SanityManager.DEBUG(DBG_FLAG, "end log sync.");
3492:                    }
3493:                } catch (SyncFailedException sfe) {
3494:                    throw markCorrupt(StandardException.newException(
3495:                            SQLState.LOG_CANNOT_FLUSH, sfe, getLogFileName(
3496:                                    logFileNumber).getPath()));
3497:                } catch (IOException ioe) {
3498:                    throw markCorrupt(StandardException.newException(
3499:                            SQLState.LOG_CANNOT_FLUSH, ioe, getLogFileName(
3500:                                    logFileNumber).getPath()));
3501:                } finally {
3502:                    synchronized (this ) {
3503:                        logBeingFlushed = false; // done flushing
3504:
3505:                        // update lastFlush under synchronized this instead of synchronized(logOut)
3506:                        if (syncSuceed) {
3507:                            lastFlush = potentialLastFlush;
3508:                        }
3509:
3510:                        // We may actually have flushed more than that because someone
3511:                        // may have done a logOut.flushBuffer right before the sync
3512:                        // call. But this is guarenteed to be flushed.
3513:                        notifyAll();
3514:                    }
3515:                }
3516:
3517:                // get checkpoint Daemon to work
3518:                if ((logWrittenFromLastCheckPoint + potentialLastFlush) > checkpointInterval
3519:                        && checkpointDaemon != null
3520:                        && !checkpointDaemonCalled
3521:                        && !inLogSwitch) {
3522:                    // following synchronized block is required to make 
3523:                    // sure only one checkpoint request get scheduled.
3524:                    synchronized (this ) {
3525:                        // recheck if checkpoint is still required, it is possible some other
3526:                        // thread might have already scheduled a checkpoint and completed it. 
3527:                        if ((logWrittenFromLastCheckPoint + potentialLastFlush) > checkpointInterval
3528:                                && checkpointDaemon != null
3529:                                && !checkpointDaemonCalled && !inLogSwitch) {
3530:                            checkpointDaemonCalled = true;
3531:                            checkpointDaemon.serviceNow(myClientNumber);
3532:                        }
3533:                    }
3534:
3535:                } else {
3536:                    // switch the log if required, this case will occur 
3537:                    // if log switch interval is less than the checkpoint interval
3538:                    // other wise , checkpoint daemon would be doing log switches along
3539:                    // with the checkpoints.
3540:                    if (potentialLastFlush > logSwitchInterval
3541:                            && !checkpointDaemonCalled && !inLogSwitch) {
3542:                        // following synchronized block is required to make sure only
3543:                        // one thread switches the log file at a time.
3544:                        synchronized (this ) {
3545:                            // recheck if log switch is still required, it is possible some other
3546:                            // thread might have already switched the log file. 
3547:                            if (potentialLastFlush > logSwitchInterval
3548:                                    && !checkpointDaemonCalled && !inLogSwitch) {
3549:                                inLogSwitch = true;
3550:                                switchLogFile();
3551:                            }
3552:                        }
3553:                    }
3554:                }
3555:            }
3556:
3557:            /**
3558:             * Utility routine to call sync() on the input file descriptor.
3559:             * <p> 
3560:             */
3561:            private void syncFile(StorageRandomAccessFile raf)
3562:                    throws StandardException {
3563:                for (int i = 0;;) {
3564:                    // 3311: JVM sync call sometimes fails under high load against NFS 
3565:                    // mounted disk.  We re-try to do this 20 times.
3566:                    try {
3567:                        raf.sync(false);
3568:
3569:                        // the sync succeed, so return
3570:                        break;
3571:                    } catch (IOException ioe) {
3572:                        i++;
3573:                        try {
3574:                            // wait for .2 of a second, hopefully I/O is done by now
3575:                            // we wait a max of 4 seconds before we give up
3576:                            Thread.sleep(200);
3577:                        } catch (InterruptedException ie) {
3578:                            //does not matter weather I get interrupted or not
3579:                        }
3580:
3581:                        if (i > 20) {
3582:                            throw StandardException.newException(
3583:                                    SQLState.LOG_FULL, ioe);
3584:                        }
3585:                    }
3586:                }
3587:            }
3588:
3589:            /**
3590:              Open a forward scan of the transaction log.
3591:
3592:              <P> MT- read only
3593:              @exception StandardException  Standard cloudscape exception policy
3594:             */
3595:            public LogScan openForwardsFlushedScan(LogInstant startAt)
3596:                    throws StandardException {
3597:                checkCorrupt();
3598:
3599:                // no need to flush the buffer as it's a flushed scan
3600:
3601:                return new FlushedScan(this , ((LogCounter) startAt)
3602:                        .getValueAsLong());
3603:            }
3604:
3605:            /**
3606:              Get a forwards scan
3607:
3608:              @exception StandardException Standard Cloudscape error policy
3609:             */
3610:            public LogScan openForwardsScan(LogInstant startAt,
3611:                    LogInstant stopAt) throws StandardException {
3612:                try {
3613:                    long startLong;
3614:
3615:                    if (startAt == null)
3616:                        startLong = LogCounter.INVALID_LOG_INSTANT;
3617:                    else
3618:                        startLong = ((LogCounter) startAt).getValueAsLong();
3619:
3620:                    return openForwardsScan(startLong, stopAt);
3621:                }
3622:
3623:                catch (IOException ioe) {
3624:                    throw markCorrupt(StandardException.newException(
3625:                            SQLState.LOG_IO_ERROR, ioe));
3626:                }
3627:
3628:            }
3629:
3630:            public final boolean databaseEncrypted() {
3631:                return databaseEncrypted;
3632:            }
3633:
3634:            /*
3635:             * Set that the database is encrypted , all the transaction log has 
3636:             * to be encrypted, and flush the log if requesed. Log needs to 
3637:             * be flushed  first, if this is  being set during (re) encryption 
3638:             * of an existing  database. 
3639:             *
3640:             * @param flushLog  true, if log needs to be flushed, 
3641:             *                  otherwise false.  
3642:             */
3643:            public void setDatabaseEncrypted(boolean flushLog)
3644:                    throws StandardException {
3645:                if (flushLog)
3646:                    flushAll();
3647:                databaseEncrypted = true;
3648:            }
3649:
3650:            /*
3651:             * set up a new log file to start writing 
3652:             * the log records into the new log file 
3653:             * after this call.
3654:             *
3655:             * <P>MT - synchronization provided by caller - RawStore boot,
3656:             * This method is called while re-encrypting the database 
3657:             * at databse boot time. 
3658:             */
3659:            public void startNewLogFile() throws StandardException {
3660:                // switch the database to a new log file.
3661:                switchLogFile();
3662:            }
3663:
3664:            /*
3665:             * find if the checkpoint is in the last log file. 
3666:             *
3667:             * <P>MT - synchronization provided by caller - RawStore boot,
3668:             * This method is called only if a crash occured while 
3669:             * re-encrypting the database at boot time. 
3670:             * @return <code> true </code> if if the checkpoint is 
3671:             *                in the last log file, otherwise 
3672:             *                 <code> false </code>.
3673:             */
3674:            public boolean isCheckpointInLastLogFile() throws StandardException {
3675:                // check if the checkpoint is done in the last log file. 
3676:                long logFileNumberAfterCheckpoint = LogCounter
3677:                        .getLogFileNumber(checkpointInstant) + 1;
3678:
3679:                // check if there is a log file after
3680:                // the log file that has the last 
3681:                // checkpoint record.
3682:                StorageFile logFileAfterCheckpoint = getLogFileName(logFileNumberAfterCheckpoint);
3683:                // System.out.println("checking " + logFileAfterCheckpoint);
3684:                if (privExists(logFileAfterCheckpoint))
3685:                    return false;
3686:                else
3687:                    return true;
3688:            }
3689:
3690:            /*
3691:             * delete the log file after the checkpoint. 
3692:             *
3693:             * <P>MT - synchronization provided by caller - RawStore boot,
3694:             * This method is called only if a crash occured while 
3695:             * re-encrypting the database at boot time. 
3696:             */
3697:            public void deleteLogFileAfterCheckpointLogFile()
3698:                    throws StandardException {
3699:                long logFileNumberAfterCheckpoint = LogCounter
3700:                        .getLogFileNumber(checkpointInstant) + 1;
3701:
3702:                StorageFile logFileAfterCheckpoint = getLogFileName(logFileNumberAfterCheckpoint);
3703:
3704:                // System.out.println("deleting " + logFileAfterCheckpoint);
3705:
3706:                if (privExists(logFileAfterCheckpoint)) {
3707:                    // delete the log file (this must have beend encrypted 
3708:                    // with the new key.
3709:                    if (!privDelete(logFileAfterCheckpoint)) {
3710:                        // throw exception, recovery can not be performed
3711:                        // without deleting the log file encyrpted with new key.
3712:                        throw StandardException.newException(
3713:                                SQLState.UNABLE_TO_DELETE_FILE,
3714:                                logFileAfterCheckpoint);
3715:                    }
3716:                }
3717:            }
3718:
3719:            /**
3720:            	@see RawStoreFactory#encrypt
3721:            	@exception StandardException Standard Cloudscape Error Policy
3722:             */
3723:            public int encrypt(byte[] cleartext, int offset, int length,
3724:                    byte[] ciphertext, int outputOffset)
3725:                    throws StandardException {
3726:                return rawStoreFactory.encrypt(cleartext, offset, length,
3727:                        ciphertext, outputOffset, false);
3728:            }
3729:
3730:            /**
3731:            	@see RawStoreFactory#decrypt
3732:            	@exception StandardException Standard Cloudscape Error Policy
3733:             */
3734:            public int decrypt(byte[] ciphertext, int offset, int length,
3735:                    byte[] cleartext, int outputOffset)
3736:                    throws StandardException {
3737:                return rawStoreFactory.decrypt(ciphertext, offset, length,
3738:                        cleartext, outputOffset);
3739:            }
3740:
3741:            /**
3742:            	return the encryption block size used during encrypted db creation
3743:             */
3744:            public int getEncryptionBlockSize() {
3745:                return rawStoreFactory.getEncryptionBlockSize();
3746:            }
3747:
3748:            /**
3749:               returns the length that will make the data to be multiple of encryption
3750:               block size based on the given length. Block cipher algorithms like DES 
3751:               and Blowfish ..etc  require their input to be an exact multiple of the block size.
3752:             */
3753:            public int getEncryptedDataLength(int length) {
3754:                if ((length % getEncryptionBlockSize()) != 0) {
3755:                    return length + getEncryptionBlockSize()
3756:                            - (length % getEncryptionBlockSize());
3757:                }
3758:
3759:                return length;
3760:            }
3761:
3762:            /**
3763:              Get the instant of the first record which was not
3764:              flushed.
3765:
3766:              <P>This only works after running recovery the first time.
3767:              <P>MT - RESOLVE:
3768:             */
3769:            public synchronized LogInstant getFirstUnflushedInstant() {
3770:                if (SanityManager.DEBUG)
3771:                    SanityManager.ASSERT(logFileNumber > 0 && lastFlush > 0);
3772:
3773:                return new LogCounter(logFileNumber, lastFlush);
3774:            }
3775:
3776:            /**
3777:             * Backup restore - stop sending log record to the log stream
3778:             * @exception StandardException Standard Cloudscape error policy
3779:             */
3780:            public void freezePersistentStore() throws StandardException {
3781:                // if I get into this synchronized block, I know I am not in the middle
3782:                // of a write because writing to the log file is synchronized under this.
3783:                synchronized (this ) {
3784:                    isFrozen = true;
3785:                }
3786:            }
3787:
3788:            /**
3789:             * Backup restore - start sending log record to the log stream
3790:             * @exception StandardException Standard Cloudscape error policy
3791:             */
3792:            public void unfreezePersistentStore() throws StandardException {
3793:                synchronized (this ) {
3794:                    isFrozen = false;
3795:                    notifyAll();
3796:                }
3797:            }
3798:
3799:            /**
3800:             * Backup restore - is the log being archived to some directory?
3801:             * if log archive mode is enabled return true else false
3802:             */
3803:            public boolean logArchived() {
3804:                return logArchived;
3805:            }
3806:
3807:            /**
3808:               Check to see if a database has been upgraded to the required
3809:               level in order to use a store feature.
3810:               @param requiredMajorVersion  required database Engine major version
3811:               @param requiredMinorVersion  required database Engine minor version
3812:               @return True if the database has been upgraded to the required level, false otherwise.
3813:             **/
3814:            boolean checkVersion(int requiredMajorVersion,
3815:                    int requiredMinorVersion) {
3816:                if (onDiskMajorVersion > requiredMajorVersion) {
3817:                    return true;
3818:                } else {
3819:                    if (onDiskMajorVersion == requiredMajorVersion
3820:                            && onDiskMinorVersion >= requiredMinorVersion)
3821:                        return true;
3822:                }
3823:
3824:                return false;
3825:            }
3826:
3827:            /**
3828:             *  Check to see if a database has been upgraded to the required
3829:             *  level in order to use a store feature.
3830:             *
3831:             * @param requiredMajorVersion  required database Engine major version
3832:             * @param requiredMinorVersion  required database Engine minor version
3833:             * @param feature Non-null to throw an exception, null to return the 
3834:             *                state of the version match.
3835:             * @return <code> true </code> if the database has been upgraded to 
3836:             *         the required level, <code> false </code> otherwise.
3837:             * @exception  StandardException 
3838:             *             if the database is not at the require version 
3839:             *             when <code>feature</code> feature is 
3840:             *             not <code> null </code>. 
3841:             */
3842:            public boolean checkVersion(int requiredMajorVersion,
3843:                    int requiredMinorVersion, String feature)
3844:                    throws StandardException {
3845:
3846:                boolean isRequiredVersion = checkVersion(requiredMajorVersion,
3847:                        requiredMinorVersion);
3848:
3849:                // if the database is not at the required version , throw exception 
3850:                // if the feature is non-null . 
3851:                if (!isRequiredVersion && feature != null) {
3852:                    throw StandardException.newException(
3853:                            SQLState.LANG_STATEMENT_UPGRADE_REQUIRED, feature,
3854:                            ProductVersionHolder.simpleVersionString(
3855:                                    onDiskMajorVersion, onDiskMinorVersion,
3856:                                    onDiskBeta), ProductVersionHolder
3857:                                    .simpleVersionString(requiredMajorVersion,
3858:                                            requiredMinorVersion, false));
3859:                }
3860:
3861:                return isRequiredVersion;
3862:            }
3863:
3864:            /*
3865:             ** Sending information to the user without throwing exception.
3866:             ** There are times when unusual external or system related things happen in
3867:             ** the log which the user may be interested in but which doesn't impede on
3868:             ** the normal operation of the store.  When such an event occur, just send
3869:             ** a message or a warning message to the user rather than throw an
3870:             ** exception, which will rollback a user transaction or crash the database.
3871:             **
3872:             ** logErrMsg - sends a warning message to the user 
3873:             */
3874:
3875:            /**
3876:            	Print error message to user about the log
3877:            	MT - not needed, informational only
3878:             */
3879:            protected void logErrMsg(String msg) {
3880:                logErrMsgForDurabilityTestModeNoSync();
3881:                Monitor.logTextMessage(MessageId.LOG_BEGIN_ERROR);
3882:                Monitor.logMessage(msg);
3883:                Monitor.logTextMessage(MessageId.LOG_END_ERROR);
3884:            }
3885:
3886:            /**
3887:            	Print error message to user about the log
3888:            	MT - not needed, informational only
3889:             */
3890:            protected void logErrMsg(Throwable t) {
3891:                logErrMsgForDurabilityTestModeNoSync();
3892:                if (corrupt != null) {
3893:                    Monitor.logTextMessage(MessageId.LOG_BEGIN_CORRUPT_STACK);
3894:                    printErrorStack(corrupt);
3895:                    Monitor.logTextMessage(MessageId.LOG_END_CORRUPT_STACK);
3896:                }
3897:
3898:                if (t != corrupt) {
3899:                    Monitor.logTextMessage(MessageId.LOG_BEGIN_ERROR_STACK);
3900:                    printErrorStack(t);
3901:                    Monitor.logTextMessage(MessageId.LOG_END_ERROR_STACK);
3902:                }
3903:            }
3904:
3905:            /**
3906:             * In case of boot errors, and if database is either booted
3907:             * with derby.system.durability=test or was previously at any time booted in
3908:             * this mode, mention in the error message that the error is probably 
3909:             * because the derby.system.durability was set. 
3910:             * Dont want to waste time to resolve issues in such
3911:             * cases
3912:             * <p>
3913:             * MT - not needed, informational only
3914:             */
3915:            private void logErrMsgForDurabilityTestModeNoSync() {
3916:                if (logNotSynced || wasDBInDurabilityTestModeNoSync) {
3917:                    Monitor.logTextMessage(
3918:                            MessageId.LOG_DURABILITY_TESTMODE_NO_SYNC_ERR,
3919:                            Property.DURABILITY_PROPERTY,
3920:                            Property.DURABILITY_TESTMODE_NO_SYNC);
3921:                }
3922:            }
3923:
3924:            /**
3925:             * print stack trace from the Throwable including
3926:             * its nested exceptions 
3927:             * @param t trace starts from this error
3928:             */
3929:            private void printErrorStack(Throwable t) {
3930:                ErrorStringBuilder esb = new ErrorStringBuilder(Monitor
3931:                        .getStream().getHeader());
3932:                esb.stackTrace(t);
3933:                Monitor.logMessage(esb.get().toString());
3934:                esb.reset();
3935:            }
3936:
3937:            /**
3938:             *  Testing support
3939:             */
3940:            /** 
3941:            	Writes out a partial log record - takes the appendLogRecord.
3942:            	Need to shutdown the database before another log record gets written,
3943:            	or the database is not recoverable.
3944:             */
3945:            private long logtest_appendPartialLogRecord(byte[] data,
3946:                    int offset, int length, byte[] optionalData,
3947:                    int optionalDataOffset, int optionalDataLength)
3948:                    throws StandardException {
3949:                if (SanityManager.DEBUG) {
3950:                    int bytesToWrite = 1;
3951:
3952:                    String TestPartialLogWrite = PropertyUtil
3953:                            .getSystemProperty(TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES);
3954:                    if (TestPartialLogWrite != null) {
3955:                        bytesToWrite = Integer.valueOf(TestPartialLogWrite)
3956:                                .intValue();
3957:                    }
3958:
3959:                    Monitor
3960:                            .logMessage("TEST_LOG_INCOMPLETE_LOG_WRITE: writing "
3961:                                    + bytesToWrite
3962:                                    + " bytes out of "
3963:                                    + length
3964:                                    + " + "
3965:                                    + LOG_RECORD_OVERHEAD
3966:                                    + " log record");
3967:
3968:                    long instant;
3969:                    try {
3970:
3971:                        synchronized (this ) {
3972:                            // reserve the space for the checksum log record
3973:                            // NOTE:  bytesToWrite include the log record overhead.
3974:                            endPosition += logOut
3975:                                    .reserveSpaceForChecksum(
3976:                                            ((length + LOG_RECORD_OVERHEAD) < bytesToWrite ? length
3977:                                                    : (bytesToWrite - LOG_RECORD_OVERHEAD)),
3978:                                            logFileNumber, endPosition);
3979:                            instant = currentInstant();
3980:
3981:                            //check if the length of the records to be written is 
3982:                            //actually smaller than the number of bytesToWrite 
3983:                            if (length + LOG_RECORD_OVERHEAD < bytesToWrite)
3984:                                endPosition += (length + LOG_RECORD_OVERHEAD);
3985:                            else
3986:                                endPosition += bytesToWrite;
3987:
3988:                            while (true) // so we can break out without returning out of
3989:                            // sync block...
3990:                            {
3991:                                if (bytesToWrite < 4) {
3992:                                    int shift = 3;
3993:                                    while (bytesToWrite-- > 0) {
3994:                                        logOut
3995:                                                .write((byte) ((length >>> 8 * shift) & 0xFF));
3996:                                        shift--;
3997:                                    }
3998:                                    break;
3999:                                }
4000:
4001:                                // the length before the log record
4002:                                logOut.writeInt(length);
4003:                                bytesToWrite -= 4;
4004:
4005:                                if (bytesToWrite < 8) {
4006:                                    int shift = 7;
4007:                                    while (bytesToWrite-- > 0) {
4008:                                        logOut
4009:                                                .write((byte) ((instant >>> 8 * shift) & 0xFF));
4010:                                        shift--;
4011:                                    }
4012:                                    break;
4013:                                }
4014:
4015:                                // the log instant
4016:                                logOut.writeLong(instant);
4017:                                bytesToWrite -= 8;
4018:
4019:                                if (bytesToWrite < length) {
4020:                                    int dataLength = length
4021:                                            - optionalDataLength;
4022:                                    if (bytesToWrite < dataLength)
4023:                                        logOut
4024:                                                .write(data, offset,
4025:                                                        bytesToWrite);
4026:                                    else {
4027:                                        logOut.write(data, offset, dataLength);
4028:                                        bytesToWrite -= dataLength;
4029:                                        if (optionalDataLength != 0
4030:                                                && bytesToWrite > 0)
4031:                                            logOut.write(optionalData,
4032:                                                    optionalDataOffset,
4033:                                                    bytesToWrite);
4034:                                    }
4035:                                    break;
4036:                                }
4037:
4038:                                // the log data
4039:                                logOut.write(data, offset, length
4040:                                        - optionalDataLength);
4041:                                //write optional data
4042:                                if (optionalDataLength != 0)
4043:                                    logOut.write(optionalData,
4044:                                            optionalDataOffset,
4045:                                            optionalDataLength);
4046:
4047:                                bytesToWrite -= length;
4048:
4049:                                if (bytesToWrite < 4) {
4050:                                    int shift = 3;
4051:                                    while (bytesToWrite-- > 0) {
4052:                                        logOut
4053:                                                .write((byte) ((length >>> 8 * shift) & 0xFF));
4054:                                        shift--;
4055:                                    }
4056:                                    break;
4057:                                }
4058:
4059:                                // the length after the log record
4060:                                logOut.writeInt(length);
4061:                                break;
4062:
4063:                            }
4064:
4065:                            // do make sure the partial write gets on disk by sync'ing it
4066:                            flush(logFileNumber, endPosition);
4067:
4068:                        }
4069:
4070:                    } catch (IOException ioe) {
4071:                        throw StandardException.newException(SQLState.LOG_FULL,
4072:                                ioe);
4073:                    }
4074:
4075:                    return instant;
4076:                }
4077:                return 0;
4078:            }
4079:
4080:            /**
4081:            	Simulate a log full condition
4082:
4083:            	if TEST_LOG_FULL is set to true, then the property
4084:            	TEST_RECORD_TO_FILL_LOG indicates the number of times this function is
4085:            	call before an IOException simulating a log full condition is raised.
4086:
4087:            	If TEST_RECORD_TO_FILL_LOG is not set, it defaults to 100 log record
4088:             */
4089:            protected void testLogFull() throws IOException {
4090:                if (SanityManager.DEBUG) {
4091:                    if (test_numRecordToFillLog < 0) {
4092:                        String RecordToFillLog = PropertyUtil
4093:                                .getSystemProperty(TEST_RECORD_TO_FILL_LOG);
4094:                        if (RecordToFillLog != null)
4095:                            test_numRecordToFillLog = Integer.valueOf(
4096:                                    RecordToFillLog).intValue();
4097:                        else
4098:                            test_numRecordToFillLog = 100;
4099:                    }
4100:
4101:                    if (++test_logWritten > test_numRecordToFillLog)
4102:                        throw new IOException("TestLogFull "
4103:                                + test_numRecordToFillLog + " written "
4104:                                + test_logWritten);
4105:
4106:                }
4107:            }
4108:
4109:            /**
4110:             * Get the log file to Simulate a log corruption 
4111:             * FOR UNIT TESTING USAGE ONLY 
4112:             */
4113:            public StorageRandomAccessFile getLogFileToSimulateCorruption(
4114:                    long filenum) throws IOException, StandardException {
4115:                if (SanityManager.DEBUG) {
4116:                    //long filenum = LogCounter.getLogFileNumber(logInstant);
4117:                    //			long filepos = LogCounter.getLogFilePosition(logInstant);
4118:                    StorageFile fileName = getLogFileName(filenum);
4119:                    StorageRandomAccessFile log = null;
4120:                    return privRandomAccessFile(fileName, "rw");
4121:                }
4122:
4123:                return null;
4124:
4125:            }
4126:
4127:            /*********************************************************************
4128:             * Log Testing
4129:             * 
4130:             * Implementations may use these strings to simulate error conditions for
4131:             * testing purposes.
4132:             *
4133:             *********************************************************************/
4134:
4135:            /**
4136:              Set to true if we want the checkpoint to only switch the log but not
4137:              actually do the checkpoint
4138:             */
4139:            public static final String TEST_LOG_SWITCH_LOG = SanityManager.DEBUG ? "TEST_LOG_SWITCH_LOG"
4140:                    : null;
4141:
4142:            /**
4143:              Set to true if we want the up comming log record to be only partially
4144:              written.  The database is corrupted if not immediately shutdown.
4145:              Set TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES to the number of bytes to write
4146:              out, default is 1 byte.
4147:             */
4148:            public static final String TEST_LOG_INCOMPLETE_LOG_WRITE = SanityManager.DEBUG ? "TEST_LOG_INCOMPLETE_LOG_WRITE"
4149:                    : null;
4150:
4151:            /**
4152:              Set to the number of bytes we want the next log record to actually write
4153:              out, only used when TEST_LOG_INCOMPLETE_LOG_WRITE is on.  Default is 1
4154:              byte.
4155:             */
4156:            public static final String TEST_LOG_PARTIAL_LOG_WRITE_NUM_BYTES = SanityManager.DEBUG ? "derbyTesting.unittest.partialLogWrite"
4157:                    : null;
4158:
4159:            /**
4160:              Set to true if we want to simulate a log full condition
4161:             */
4162:            public static final String TEST_LOG_FULL = SanityManager.DEBUG ? "TEST_LOG_FULL"
4163:                    : null;
4164:
4165:            /**
4166:              Set to true if we want to simulate a log full condition while switching log
4167:             */
4168:            public static final String TEST_SWITCH_LOG_FAIL1 = SanityManager.DEBUG ? "TEST_SWITCH_LOG_FAIL1"
4169:                    : null;
4170:            public static final String TEST_SWITCH_LOG_FAIL2 = SanityManager.DEBUG ? "TEST_SWITCH_LOG_FAIL2"
4171:                    : null;
4172:
4173:            /**
4174:              Set to the number of log record we want to write before the log is
4175:              simulated to be full.
4176:             */
4177:            public static final String TEST_RECORD_TO_FILL_LOG = SanityManager.DEBUG ? "derbyTesting.unittest.recordToFillLog"
4178:                    : null;
4179:
4180:            /**
4181:             * Set to true if we want to simulate max possible log file number is 
4182:             * being used.
4183:             */
4184:            public static final String TEST_MAX_LOGFILE_NUMBER = SanityManager.DEBUG ? "testMaxLogFileNumber"
4185:                    : null;
4186:
4187:            //enable the log archive mode
4188:            public void enableLogArchiveMode() throws StandardException {
4189:
4190:                //if the log archive mode is already enabled; thre is nothing to do
4191:                if (!logArchived) {
4192:                    logArchived = true;
4193:                    AccessFactory af = (AccessFactory) Monitor
4194:                            .getServiceModule(this , AccessFactory.MODULE);
4195:
4196:                    if (af != null) {
4197:                        TransactionController tc = null;
4198:                        tc = af.getTransaction(ContextService.getFactory()
4199:                                .getCurrentContextManager());
4200:                        tc.setProperty(Property.LOG_ARCHIVE_MODE, "true", true);
4201:                    }
4202:                }
4203:            }
4204:
4205:            // disable the log archive mode
4206:            public void disableLogArchiveMode() throws StandardException {
4207:                AccessFactory af = (AccessFactory) Monitor.getServiceModule(
4208:                        this , AccessFactory.MODULE);
4209:                if (af != null) {
4210:                    TransactionController tc = null;
4211:                    tc = af.getTransaction(ContextService.getFactory()
4212:                            .getCurrentContextManager());
4213:                    tc.setProperty(Property.LOG_ARCHIVE_MODE, "false", true);
4214:                }
4215:                logArchived = false;
4216:            }
4217:
4218:            //delete the online archived log files
4219:            public void deleteOnlineArchivedLogFiles() {
4220:                deleteObsoleteLogfiles();
4221:            }
4222:
4223:            /*
4224:             * Start the transaction log backup.  
4225:             *
4226:             * The transaction log is required to bring the database to the consistent 
4227:             * state on restore. 
4228:             *
4229:             * All the log files that are created after the backup starts 
4230:             * must be kept around until they are copied into the backup,
4231:             * even if there are checkpoints when backup is in progress. 
4232:             *
4233:             * Copy the log control files to the backup (the checkpoint recorded in the
4234:             * control files is the backup checkpoint). Restore will use the checkpoint 
4235:             * info in these control files to perform recovery to bring 
4236:             * the database to the consistent state.  
4237:             *
4238:             * Find first log file that needs to be copied into the backup to bring 
4239:             * the database to the consistent state on restore. 
4240:             * 
4241:             * In the end, existing log files that are needed to recover from the backup
4242:             * checkpoint are copied into the backup, any log that gets generated after
4243:             * this call are also copied into the backup after all the information 
4244:             * in the data containers is written to the backup, when endLogBackup() 
4245:             * is called.
4246:             *
4247:             * @param toDir - location where the log files should be copied to.
4248:             * @exception StandardException Standard Derby error policy
4249:             *
4250:             */
4251:            public void startLogBackup(File toDir) throws StandardException {
4252:
4253:                // synchronization is necessary to make sure NO parallel 
4254:                // checkpoint happens when the current checkpoint information 
4255:                // is being copied to the backup.
4256:
4257:                synchronized (this ) {
4258:                    // wait until the thread that is doing the checkpoint completes it.
4259:                    while (inCheckpoint) {
4260:                        try {
4261:                            wait();
4262:                        } catch (InterruptedException ie) {
4263:                            throw StandardException.interrupt(ie);
4264:                        }
4265:                    }
4266:
4267:                    backupInProgress = true;
4268:
4269:                    // copy the control files. 
4270:                    StorageFile fromFile;
4271:                    File toFile;
4272:                    // copy the log control file
4273:                    fromFile = getControlFileName();
4274:                    toFile = new File(toDir, fromFile.getName());
4275:                    if (!privCopyFile(fromFile, toFile)) {
4276:                        throw StandardException.newException(
4277:                                SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile,
4278:                                toFile);
4279:                    }
4280:
4281:                    // copy the log mirror control file
4282:                    fromFile = getMirrorControlFileName();
4283:                    toFile = new File(toDir, fromFile.getName());
4284:                    if (!privCopyFile(fromFile, toFile)) {
4285:                        throw StandardException.newException(
4286:                                SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile,
4287:                                toFile);
4288:                    }
4289:
4290:                    // find the first log file number that is active
4291:                    logFileToBackup = getFirstLogNeeded(currentCheckpoint);
4292:                }
4293:
4294:                // copy all the log files that have to go into the backup 
4295:                backupLogFiles(toDir, getLogFileNumber() - 1);
4296:            }
4297:
4298:            /*
4299:             * copy the log files into the given backup location
4300:             *
4301:             * @param toDir               - location to copy the log files to
4302:             * @param lastLogFileToBackup - last log file that needs to be copied.
4303:             **/
4304:            private void backupLogFiles(File toDir, long lastLogFileToBackup)
4305:                    throws StandardException {
4306:
4307:                while (logFileToBackup <= lastLogFileToBackup) {
4308:                    StorageFile fromFile = getLogFileName(logFileToBackup);
4309:                    File toFile = new File(toDir, fromFile.getName());
4310:                    if (!privCopyFile(fromFile, toFile)) {
4311:                        throw StandardException.newException(
4312:                                SQLState.RAWSTORE_ERROR_COPYING_FILE, fromFile,
4313:                                toFile);
4314:                    }
4315:                    logFileToBackup++;
4316:                }
4317:            }
4318:
4319:            /*
4320:             * copy all the log files that have to go into the backup
4321:             * and mark that backup is compeleted. 
4322:             *
4323:             * @param toDir - location where the log files should be copied to.
4324:             * @exception StandardException Standard Derby error policy
4325:             */
4326:            public void endLogBackup(File toDir) throws StandardException {
4327:                long lastLogFileToBackup;
4328:
4329:                // Make sure all log records are synced to disk.  The online backup
4330:                // copied data "through" the cache, so may have picked up dirty pages
4331:                // which have not yet synced the associated log records to disk. 
4332:                // Without this force, the backup may end up with page versions 
4333:                // in the backup without their associated log records.
4334:                flush(logFileNumber, endPosition);
4335:
4336:                if (logArchived) {
4337:                    // when the log is being archived for roll-forward recovery
4338:                    // we would like to switch to a new log file.
4339:                    // otherwise during restore logfile in the backup could 
4340:                    // overwrite the more uptodate log files in the 
4341:                    // online log path. And also we would like to mark the end
4342:                    // marker for the log file other wise during roll-forward recovery,
4343:                    // if we see a log file with fuzzy end, we think that is the 
4344:                    // end of the recovery.
4345:                    switchLogFile();
4346:                    lastLogFileToBackup = getLogFileNumber() - 1;
4347:                } else {
4348:                    // for a plain online backup partial filled up log file is ok, 
4349:                    // no need to do a log switch.
4350:                    lastLogFileToBackup = getLogFileNumber();
4351:                }
4352:
4353:                // backup all the log that got generated after the backup started.
4354:                backupLogFiles(toDir, lastLogFileToBackup);
4355:
4356:                // mark that backup is completed.
4357:                backupInProgress = false;
4358:            }
4359:
4360:            /*
4361:             * backup is not in progress any more, it failed for some reason.
4362:             **/
4363:            public void abortLogBackup() {
4364:                backupInProgress = false;
4365:            }
4366:
4367:            // Is the transaction in rollforward recovery
4368:            public boolean inRFR() {
4369:                /*
4370:                 *Logging System does not differentiate between the
4371:                 *crash-recovery and a rollforward recovery.
4372:                 *Except in case of rollforward atttempt on 
4373:                 *read only databases to check for pending Transaction.
4374:                 *(See the comments in recovery() function)
4375:                 */
4376:
4377:                if (recoveryNeeded) {
4378:                    boolean readOnly = false;
4379:                    try {
4380:                        readOnly = !privCanWrite(getControlFileName());
4381:                    } catch (StandardException se) {
4382:                        //Exception should never have come here
4383:                        //because getControlFileName() is called 
4384:                        //earlier at boot time, if there were problems
4385:                        //it should have showed up earlier.							
4386:                        //We just ignore this error and hope that
4387:                        //datafactory must have market it as read only if that is the case.
4388:                    }
4389:
4390:                    readOnly = readOnly
4391:                            || (dataFactory == null ? false : dataFactory
4392:                                    .isReadOnly());
4393:                    return !readOnly;
4394:                } else {
4395:                    return false;
4396:                }
4397:            }
4398:
4399:            /**	
4400:             *	redo a checkpoint during rollforward recovery
4401:             */
4402:            public void checkpointInRFR(LogInstant cinstant, long redoLWM,
4403:                    DataFactory df) throws StandardException {
4404:                //sync the data
4405:                df.checkpoint();
4406:
4407:                //write the log control file; this will make sure that restart of the 
4408:                //rollfoward recovery will start this log instant next time instead of
4409:                //from the beginning.
4410:                try {
4411:                    if (!writeControlFile(getControlFileName(),
4412:                            ((LogCounter) cinstant).getValueAsLong())) {
4413:                        throw StandardException
4414:                                .newException(SQLState.LOG_CONTROL_FILE,
4415:                                        getControlFileName());
4416:                    }
4417:                } catch (IOException ioe) {
4418:                    throw markCorrupt(StandardException.newException(
4419:                            SQLState.LOG_IO_ERROR, ioe));
4420:                }
4421:                //remove the stub files
4422:                df.removeDroppedContainerFileStubs(new LogCounter(redoLWM));
4423:
4424:            }
4425:
4426:            /**
4427:             *
4428:             * This function restores logs based on the  following attributes
4429:             * are specified on connection URL:
4430:             * Attribute.CREATE_FROM (Create database from backup if it does not exist)
4431:             * Attribute.RESTORE_FROM (Delete the whole database if it exists and then 
4432:             * restore it from backup)
4433:             * Attribute.ROLL_FORWARD_RECOVERY_FROM:(Perform Rollforward Recovery;
4434:             * except for the log directory everthing else is replced  by the copy  from
4435:             * backup. log files in the backup are copied to the existing online log 
4436:             * directory.
4437:             *
4438:             * In cases of RESTORE_FROM whole databases directoy is 
4439:             * is removed in Directory.java while restoring service.properties
4440:             * so even the log directory is removed.
4441:             * In case of CREATE_FROM , log directoy will not exist if 
4442:             * we came so far bacause it should fail if a database already exists.
4443:             * In case ROLL_FORWARD_RECOVERY_FROM log directotry should not be removed.
4444:             * So only thing that needs to be done here is create a
4445:             * a log directory if it does not exists and copy the 
4446:             * log files(including control files) that exists in the backup from which 
4447:             * we are are trying to restore the database to the onlie log directory.
4448:             */
4449:            private boolean restoreLogs(Properties properties)
4450:                    throws StandardException {
4451:
4452:                String backupPath = null;
4453:                boolean isCreateFrom = false;
4454:                boolean isRestoreFrom = false;
4455:
4456:                //check if the user requested for restore/recovery/create from backup
4457:                backupPath = properties.getProperty(Attribute.CREATE_FROM);
4458:                if (backupPath != null) {
4459:                    isCreateFrom = true;
4460:                } else {
4461:                    backupPath = properties.getProperty(Attribute.RESTORE_FROM);
4462:                    if (backupPath != null) {
4463:                        isRestoreFrom = true;
4464:                    } else {
4465:                        backupPath = properties
4466:                                .getProperty(Attribute.ROLL_FORWARD_RECOVERY_FROM);
4467:                        // if the backup is not NULL then it is a rollforward recovery.
4468:                    }
4469:                }
4470:
4471:                if (backupPath != null) {
4472:                    if (!isCreateFrom) {
4473:                        if (logDevice == null) {
4474:                            /**
4475:                             * In restoreFrom/rollForwardRecoveryFrom mode when no 
4476:                             * logDevice on URL then the log is restored to the same 
4477:                             * location where the log was when backup was taken.
4478:                             * In createFrom mode behaviour is same as when create=true,
4479:                             * i.e unless user specifies the logDevice on URL, log will
4480:                             * be copied to the database home dir.
4481:                             * Note: LOG_DEVICE_AT_BACKUP will get set if log is not in
4482:                             * default location(db home). 
4483:                             */
4484:                            logDevice = properties
4485:                                    .getProperty(Property.LOG_DEVICE_AT_BACKUP);
4486:                        }
4487:                    }
4488:
4489:                    getLogStorageFactory();
4490:                    StorageFile logDir;
4491:                    logDir = logStorageFactory
4492:                            .newStorageFile(LogFactory.LOG_DIRECTORY_NAME);
4493:
4494:                    //remove the log directory in case of restoreFrom 
4495:                    //if it exist, this happens if the log device is on seperate
4496:                    //location than the db home.
4497:                    if (isRestoreFrom && logDevice != null) {
4498:                        if (!privRemoveDirectory(logDir)) {
4499:                            //it may be just a file, try deleting it
4500:                            if (!privDelete(logDir)) {
4501:                                throw StandardException
4502:                                        .newException(
4503:                                                SQLState.UNABLE_TO_REMOVE_DATA_DIRECTORY,
4504:                                                getLogDirPath(logDir));
4505:                            }
4506:                        }
4507:                    }
4508:
4509:                    // if it is a create/restore from backup, 
4510:                    // create the log directory.
4511:                    if (isCreateFrom || isRestoreFrom) {
4512:                        createLogDirectory();
4513:                    }
4514:
4515:                    File backupLogDir = new File(backupPath,
4516:                            LogFactory.LOG_DIRECTORY_NAME);
4517:                    String[] logfilelist = privList(backupLogDir);
4518:                    if (logfilelist != null) {
4519:                        for (int i = 0; i < logfilelist.length; i++) {
4520:                            File blogFile = new File(backupLogDir,
4521:                                    logfilelist[i]);
4522:                            StorageFile clogFile = logStorageFactory
4523:                                    .newStorageFile(logDir, logfilelist[i]);
4524:                            if (!privCopyFile(blogFile, clogFile)) {
4525:                                throw StandardException.newException(
4526:                                        SQLState.UNABLE_TO_COPY_LOG_FILE,
4527:                                        blogFile, clogFile);
4528:                            }
4529:                        }
4530:                    } else {
4531:                        throw StandardException.newException(
4532:                                SQLState.LOG_DIRECTORY_NOT_FOUND_IN_BACKUP,
4533:                                backupLogDir);
4534:                    }
4535:                    //we need to switch the log file after redo while
4536:                    //doing recovery from backups, otherwise we will 
4537:                    //be replacing updated log after a restore withe 
4538:                    // a log in the backup on next restore.
4539:                    logSwitchRequired = true;
4540:
4541:                    // log is restored from backup.
4542:                    return true;
4543:                } else {
4544:                    // log is not restored from backup.
4545:                    return false;
4546:                }
4547:            }
4548:
4549:            /*preallocate the given log File to the logSwitchInterval size;
4550:             *file is extended by writing zeros after the header till 
4551:             *the log file size the set by the user.
4552:             */
4553:            private void preAllocateNewLogFile(StorageRandomAccessFile log)
4554:                    throws IOException, StandardException {
4555:                //preallocate a file by writing zeros into it . 
4556:
4557:                if (SanityManager.DEBUG) {
4558:                    int currentPostion = (int) log.getFilePointer();
4559:                    SanityManager.ASSERT(
4560:                            currentPostion == LOG_FILE_HEADER_SIZE,
4561:                            "New Log File Is not Correctly Initialized");
4562:                }
4563:
4564:                int amountToWrite = logSwitchInterval - LOG_FILE_HEADER_SIZE;
4565:                int bufferSize = logBufferSize * 2;
4566:                byte[] emptyBuffer = new byte[bufferSize];
4567:                int nWrites = amountToWrite / bufferSize;
4568:                int remainingBytes = amountToWrite % bufferSize;
4569:
4570:                try {
4571:                    while (nWrites-- > 0)
4572:                        log.write(emptyBuffer);
4573:
4574:                    if (remainingBytes != 0)
4575:                        log.write(emptyBuffer, 0, remainingBytes);
4576:
4577:                    //sync the file
4578:                    syncFile(log);
4579:                } catch (IOException ie) {
4580:                    //ignore io exceptions during preallocations
4581:                    //because this more for performance improvements
4582:                    //system shoulf work fine even without preallocations. 
4583:
4584:                    //RESOLVE: If  the exception is because of no 
4585:                    //space, might be good idea to trigger a checkpoint.
4586:
4587:                    //In debug mode throw the exception 
4588:                    if (SanityManager.DEBUG) {
4589:                        throw ie;
4590:                    }
4591:                }
4592:            } // end of preAllocateNewLogFile
4593:
4594:            /**
4595:             * open the given log file name for writes; if file can not be 
4596:             * be opened in write sync mode then disable the write sync mode and 
4597:             * open the file in "rw" mode.
4598:             */
4599:            private StorageRandomAccessFile openLogFileInWriteMode(
4600:                    StorageFile logFile) throws IOException {
4601:                StorageRandomAccessFile log;
4602:                try {
4603:                    log = privRandomAccessFile(logFile, "rws");
4604:                } catch (FileNotFoundException ex) {
4605:                    // Normally this exception should never occur. For some reason
4606:                    // currently on Mac JVM 1.4.2 FileNotFoundException exception is
4607:                    // thrown if a file is opened in "rws" mode and if it already
4608:                    // exists. Please refere to Derby-1 for more/ details on this issue.
4609:                    // Temporary workaround to avoid this problem is to make the logging 
4610:                    // system use file sync mechanism. 
4611:
4612:                    // disable the write sync and open the file in "rw" mode. 
4613:                    isWriteSynced = false;
4614:                    log = privRandomAccessFile(logFile, "rw");
4615:                }
4616:
4617:                return log;
4618:            }
4619:
4620:            private String getLogDirPath(StorageFile logDir) {
4621:                if (logDevice == null)
4622:                    return logDir.toString();
4623:                return logDevice + logStorageFactory.getSeparator()
4624:                        + logDir.toString();
4625:            } // end of getLogDirPath
4626:
4627:            /*
4628:                Following  methods require Priv Blocks to run under a security manager.
4629:             */
4630:            private int action;
4631:            private StorageFile activeFile;
4632:            private File toFile;
4633:            private String activePerms;
4634:
4635:            protected boolean privExists(StorageFile file) {
4636:                return runBooleanAction(0, file);
4637:            }
4638:
4639:            protected boolean privDelete(StorageFile file) {
4640:                return runBooleanAction(1, file);
4641:            }
4642:
4643:            private synchronized StorageRandomAccessFile privRandomAccessFile(
4644:                    StorageFile file, String perms) throws IOException {
4645:                action = 2;
4646:                activeFile = file;
4647:                activePerms = perms;
4648:                try {
4649:                    return (StorageRandomAccessFile) java.security.AccessController
4650:                            .doPrivileged(this );
4651:                } catch (java.security.PrivilegedActionException pae) {
4652:                    throw (IOException) pae.getException();
4653:                }
4654:            }
4655:
4656:            protected boolean privCanWrite(StorageFile file) {
4657:                return runBooleanAction(3, file);
4658:            }
4659:
4660:            protected boolean privMkdirs(StorageFile file) {
4661:                return runBooleanAction(4, file);
4662:            }
4663:
4664:            private synchronized String[] privList(File file) {
4665:                action = 8;
4666:                toFile = file;
4667:
4668:                try {
4669:                    return (String[]) java.security.AccessController
4670:                            .doPrivileged(this );
4671:                } catch (java.security.PrivilegedActionException pae) {
4672:                    return null;
4673:                }
4674:            }
4675:
4676:            private synchronized String[] privList(StorageFile file) {
4677:                action = 5;
4678:                activeFile = file;
4679:
4680:                try {
4681:                    return (String[]) java.security.AccessController
4682:                            .doPrivileged(this );
4683:                } catch (java.security.PrivilegedActionException pae) {
4684:                    return null;
4685:                }
4686:            }
4687:
4688:            private synchronized boolean privCopyFile(StorageFile from, File to) {
4689:                action = 6;
4690:                activeFile = from;
4691:                toFile = to;
4692:                try {
4693:                    return ((Boolean) java.security.AccessController
4694:                            .doPrivileged(this )).booleanValue();
4695:                } catch (java.security.PrivilegedActionException pae) {
4696:                    return false;
4697:                }
4698:            }
4699:
4700:            private synchronized boolean privCopyFile(File from, StorageFile to) {
4701:                action = 9;
4702:                activeFile = to;
4703:                toFile = from;
4704:                try {
4705:                    return ((Boolean) java.security.AccessController
4706:                            .doPrivileged(this )).booleanValue();
4707:                } catch (java.security.PrivilegedActionException pae) {
4708:                    return false;
4709:                }
4710:            }
4711:
4712:            private boolean privRemoveDirectory(StorageFile file) {
4713:                return runBooleanAction(7, file);
4714:            }
4715:
4716:            private synchronized boolean runBooleanAction(int action,
4717:                    StorageFile file) {
4718:                this .action = action;
4719:                this .activeFile = file;
4720:
4721:                try {
4722:                    return ((Boolean) java.security.AccessController
4723:                            .doPrivileged(this )).booleanValue();
4724:                } catch (java.security.PrivilegedActionException pae) {
4725:                    return false;
4726:                }
4727:            }
4728:
4729:            public final Object run() throws IOException {
4730:                switch (action) {
4731:                case 0:
4732:                    // SECURITY PERMISSION - MP1
4733:                    return ReuseFactory.getBoolean(activeFile.exists());
4734:                case 1:
4735:                    // SECURITY PERMISSION - OP5
4736:                    return ReuseFactory.getBoolean(activeFile.delete());
4737:                case 2:
4738:                    // SECURITY PERMISSION - MP1 and/or OP4
4739:                    // dependening on the value of activePerms
4740:                    return activeFile.getRandomAccessFile(activePerms);
4741:                case 3:
4742:                    // SECURITY PERMISSION - OP4
4743:                    return ReuseFactory.getBoolean(activeFile.canWrite());
4744:                case 4:
4745:                    // SECURITY PERMISSION - OP4
4746:                    return ReuseFactory.getBoolean(activeFile.mkdirs());
4747:                case 5:
4748:                    // SECURITY PERMISSION - MP1
4749:                    return activeFile.list();
4750:                case 6:
4751:                    // SECURITY PERMISSION - OP4 (Have to check these codes ??)
4752:                    return ReuseFactory.getBoolean(FileUtil.copyFile(
4753:                            logStorageFactory, activeFile, toFile));
4754:                case 7:
4755:                    // SECURITY PERMISSION - OP4
4756:                    if (!activeFile.exists())
4757:                        return ReuseFactory.getBoolean(true);
4758:                    return ReuseFactory.getBoolean(activeFile.deleteAll());
4759:                case 8:
4760:                    return toFile.list();
4761:                case 9:
4762:                    return ReuseFactory.getBoolean(FileUtil.copyFile(
4763:                            logStorageFactory, toFile, activeFile));
4764:
4765:                default:
4766:                    return null;
4767:                }
4768:            }
4769:        }
ww___w__.___j___a_v_a2__s___.__c___o__m__ | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.