Source Code Cross Referenced for LogManager.java in  » JMX » je » com » sleepycat » je » log » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » JMX » je » com.sleepycat.je.log 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        /*-
002:         * See the file LICENSE for redistribution information.
003:         *
004:         * Copyright (c) 2002,2008 Oracle.  All rights reserved.
005:         *
006:         * $Id: LogManager.java,v 1.163.2.8 2008/01/07 15:14:13 cwl Exp $
007:         */
008:
009:        package com.sleepycat.je.log;
010:
011:        import java.io.IOException;
012:        import java.io.RandomAccessFile;
013:        import java.nio.BufferOverflowException;
014:        import java.nio.ByteBuffer;
015:        import java.nio.channels.ClosedChannelException;
016:        import java.util.List;
017:
018:        import com.sleepycat.je.DatabaseException;
019:        import com.sleepycat.je.EnvironmentStats;
020:        import com.sleepycat.je.RunRecoveryException;
021:        import com.sleepycat.je.StatsConfig;
022:        import com.sleepycat.je.cleaner.TrackedFileSummary;
023:        import com.sleepycat.je.cleaner.UtilizationTracker;
024:        import com.sleepycat.je.config.EnvironmentParams;
025:        import com.sleepycat.je.dbi.DbConfigManager;
026:        import com.sleepycat.je.dbi.EnvironmentImpl;
027:        import com.sleepycat.je.dbi.Operation;
028:        import com.sleepycat.je.latch.Latch;
029:        import com.sleepycat.je.latch.LatchSupport;
030:        import com.sleepycat.je.log.entry.LogEntry;
031:        import com.sleepycat.je.utilint.DbLsn;
032:        import com.sleepycat.je.utilint.TestHook;
033:        import com.sleepycat.je.utilint.Tracer;
034:
035:        /**
036:         * The LogManager supports reading and writing to the JE log.
037:         */
038:        abstract public class LogManager {
039:
040:            // no-op loggable object
041:            private static final String DEBUG_NAME = LogManager.class.getName();
042:
043:            protected LogBufferPool logBufferPool; // log buffers
044:            protected Latch logWriteLatch; // synchronizes log writes
045:            private boolean doChecksumOnRead; // if true, do checksum on read
046:            private FileManager fileManager; // access to files
047:            protected EnvironmentImpl envImpl;
048:            private boolean readOnly;
049:            private int readBufferSize; // how many bytes to read when faulting in.
050:            /* The last LSN in the log during recovery. */
051:            private long lastLsnAtRecovery = DbLsn.NULL_LSN;
052:
053:            /* Stats */
054:
055:            /*
056:             * Number of times we have to repeat a read when we fault in an object
057:             * because the initial read was too small.
058:             */
059:            private int nRepeatFaultReads;
060:
061:            /*
062:             * Number of times we have to use the temporary marshalling buffer to
063:             * write to the log.
064:             */
065:            private long nTempBufferWrites;
066:
067:            /* For unit tests */
068:            private TestHook readHook; // used for generating exceptions on log reads
069:
070:            /**
071:             * There is a single log manager per database environment.
072:             */
073:            public LogManager(EnvironmentImpl envImpl, boolean readOnly)
074:                    throws DatabaseException {
075:
076:                // Set up log buffers
077:                this .envImpl = envImpl;
078:                this .fileManager = envImpl.getFileManager();
079:                DbConfigManager configManager = envImpl.getConfigManager();
080:                this .readOnly = readOnly;
081:                logBufferPool = new LogBufferPool(fileManager, envImpl);
082:
083:                /* See if we're configured to do a checksum when reading in objects. */
084:                doChecksumOnRead = configManager
085:                        .getBoolean(EnvironmentParams.LOG_CHECKSUM_READ);
086:
087:                logWriteLatch = LatchSupport.makeLatch(DEBUG_NAME, envImpl);
088:                readBufferSize = configManager
089:                        .getInt(EnvironmentParams.LOG_FAULT_READ_SIZE);
090:            }
091:
092:            public boolean getChecksumOnRead() {
093:                return doChecksumOnRead;
094:            }
095:
096:            public long getLastLsnAtRecovery() {
097:                return lastLsnAtRecovery;
098:            }
099:
100:            public void setLastLsnAtRecovery(long lastLsnAtRecovery) {
101:                this .lastLsnAtRecovery = lastLsnAtRecovery;
102:            }
103:
104:            /**
105:             * Reset the pool when the cache is resized.  This method is called after
106:             * the memory budget has been calculated.
107:             */
108:            public void resetPool(DbConfigManager configManager)
109:                    throws DatabaseException {
110:
111:                logBufferPool.reset(configManager);
112:            }
113:
114:            /*
115:             * Writing to the log
116:             */
117:
118:            /**
119:             * Log this single object and force a write of the log files.
120:             * @param item object to be logged
121:             * @param fsyncRequired if true, log files should also be fsynced.
122:             * @return LSN of the new log entry
123:             */
124:            public long logForceFlush(LogEntry item, boolean fsyncRequired)
125:                    throws DatabaseException {
126:
127:                return log(item, false, // is provisional
128:                        true, // flush required
129:                        fsyncRequired, false, // forceNewLogFile
130:                        false, // backgroundIO
131:                        DbLsn.NULL_LSN, // old lsn
132:                        0); // old size
133:            }
134:
135:            /**
136:             * Log this single object and force a flip of the log files.
137:             * @param item object to be logged
138:             * @param fsyncRequired if true, log files should also be fsynced.
139:             * @return LSN of the new log entry
140:             */
141:            public long logForceFlip(LogEntry item) throws DatabaseException {
142:
143:                return log(item, false, // is provisional
144:                        true, // flush required
145:                        false, // fsync required
146:                        true, // forceNewLogFile
147:                        false, // backgroundIO
148:                        DbLsn.NULL_LSN, // old lsn
149:                        0); // old size
150:            }
151:
152:            /**
153:             * Write a log entry.
154:             * @return LSN of the new log entry
155:             */
156:            public long log(LogEntry item) throws DatabaseException {
157:
158:                return log(item, false, // is provisional
159:                        false, // flush required
160:                        false, // fsync required
161:                        false, // forceNewLogFile
162:                        false, // backgroundIO
163:                        DbLsn.NULL_LSN, // old lsn
164:                        0); // old size
165:            }
166:
167:            /**
168:             * Write a log entry.
169:             * @return LSN of the new log entry
170:             */
171:            public long log(LogEntry item, boolean isProvisional,
172:                    boolean backgroundIO, long oldNodeLsn, int oldNodeSize)
173:                    throws DatabaseException {
174:
175:                return log(item, isProvisional, false, // flush required
176:                        false, // fsync required
177:                        false, // forceNewLogFile
178:                        backgroundIO, oldNodeLsn, oldNodeSize);
179:            }
180:
181:            /**
182:             * Write a log entry.
183:             * @param item is the item to be logged.
184:             * @param isProvisional true if this entry should not be read during
185:             * recovery.
186:             * @param flushRequired if true, write the log to the file after
187:             * adding the item. i.e. call java.nio.channel.FileChannel.write().
188:             * @param fsyncRequired if true, fsync the last file after adding the item.
189:             * @param forceNewLogFile if true, flip to a new log file before logging
190:             * the item.
191:             * @param backgroundIO if true, sleep when the backgroundIOLimit is
192:             * exceeded.
193:             * @param oldNodeLsn is the previous version of the node to be counted as
194:             * obsolete, or NULL_LSN if the item is not a node or has no old LSN.
195:             * @param oldNodeSize is the log size of the previous version of the node
196:             * when oldNodeLsn is not NULL_LSN and the old node is an LN.  For old INs,
197:             * zero must be specified.
198:             * @return LSN of the new log entry
199:             */
200:            private long log(LogEntry item, boolean isProvisional,
201:                    boolean flushRequired, boolean fsyncRequired,
202:                    boolean forceNewLogFile, boolean backgroundIO,
203:                    long oldNodeLsn, int oldNodeSize) throws DatabaseException {
204:
205:                if (readOnly) {
206:                    return DbLsn.NULL_LSN;
207:                }
208:
209:                boolean marshallOutsideLatch = item.getLogType()
210:                        .marshallOutsideLatch();
211:                ByteBuffer marshalledBuffer = null;
212:                UtilizationTracker tracker = envImpl.getUtilizationTracker();
213:                LogResult logResult = null;
214:                boolean shouldReplicate = envImpl.isReplicated()
215:                        && item.getLogType().isTypeReplicated();
216:
217:                try {
218:
219:                    /*
220:                     * If possible, marshall this item outside the log write latch to
221:                     * allow greater concurrency by shortening the write critical
222:                     * section.  Note that the header may only be created during
223:                     * marshalling because it calls item.getSize().
224:                     */
225:                    LogEntryHeader header = null;
226:
227:                    if (marshallOutsideLatch) {
228:                        header = new LogEntryHeader(item, isProvisional,
229:                                shouldReplicate);
230:                        marshalledBuffer = marshallIntoBuffer(header, item,
231:                                isProvisional, shouldReplicate);
232:                    }
233:
234:                    logResult = logItem(header, item, isProvisional,
235:                            flushRequired, forceNewLogFile, oldNodeLsn,
236:                            oldNodeSize, marshallOutsideLatch,
237:                            marshalledBuffer, tracker, shouldReplicate);
238:
239:                } catch (BufferOverflowException e) {
240:
241:                    /*
242:                     * A BufferOverflowException may be seen when a thread is
243:                     * interrupted in the middle of the log and the nio direct buffer
244:                     * is mangled is some way by the NIO libraries. JE applications
245:                     * should refrain from using thread interrupt as a thread
246:                     * communications mechanism because nio behavior in the face of
247:                     * interrupts is uncertain. See SR [#10463].
248:                     *
249:                     * One way or another, this type of io exception leaves us in an
250:                     * unworkable state, so throw a run recovery exception.
251:                     */
252:                    throw new RunRecoveryException(envImpl, e);
253:                } catch (IOException e) {
254:
255:                    /*
256:                     * Other IOExceptions, such as out of disk conditions, should
257:                     * notify the application but leave the environment in workable
258:                     * condition.
259:                     */
260:                    throw new DatabaseException(Tracer.getStackTrace(e), e);
261:                }
262:
263:                /*
264:                 * Finish up business outside of the log write latch critical section.
265:                 */
266:
267:                /*
268:                 * If this logged object needs to be fsynced, do so now using the group
269:                 * commit mechanism.
270:                 */
271:                if (fsyncRequired) {
272:                    fileManager.groupSync();
273:                }
274:
275:                /*
276:                 * Periodically, as a function of how much data is written, ask the
277:                 * checkpointer or the cleaner to wake up.
278:                 */
279:                envImpl.getCheckpointer().wakeupAfterWrite();
280:                if (logResult.wakeupCleaner) {
281:                    tracker.activateCleaner();
282:                }
283:
284:                /* Update background writes. */
285:                if (backgroundIO) {
286:                    envImpl.updateBackgroundWrites(logResult.entrySize,
287:                            logBufferPool.getLogBufferSize());
288:                }
289:
290:                return logResult.currentLsn;
291:            }
292:
293:            abstract protected LogResult logItem(LogEntryHeader header,
294:                    LogEntry item, boolean isProvisional,
295:                    boolean flushRequired, boolean forceNewLogFile,
296:                    long oldNodeLsn, int oldNodeSize,
297:                    boolean marshallOutsideLatch, ByteBuffer marshalledBuffer,
298:                    UtilizationTracker tracker, boolean shouldReplicate)
299:                    throws IOException, DatabaseException;
300:
301:            /**
302:             * Called within the log write critical section.
303:             */
304:            protected LogResult logInternal(LogEntryHeader header,
305:                    LogEntry item, boolean isProvisional,
306:                    boolean flushRequired, boolean forceNewLogFile,
307:                    long oldNodeLsn, int oldNodeSize,
308:                    boolean marshallOutsideLatch, ByteBuffer marshalledBuffer,
309:                    UtilizationTracker tracker, boolean shouldReplicate)
310:                    throws IOException, DatabaseException {
311:
312:                /*
313:                 * Do obsolete tracking before marshalling a FileSummaryLN into the log
314:                 * buffer so that a FileSummaryLN counts itself.  countObsoleteNode
315:                 * must be called before computing the entry size, since it can change
316:                 * the size of a FileSummaryLN entry that we're logging
317:                 */
318:                LogEntryType entryType = item.getLogType();
319:                if (oldNodeLsn != DbLsn.NULL_LSN) {
320:                    tracker.countObsoleteNode(oldNodeLsn, entryType,
321:                            oldNodeSize);
322:                }
323:
324:                /*
325:                 * If an item must be protected within the log write latch for
326:                 * marshalling, take care to also calculate its size in the protected
327:                 * section. Note that we have to get the size *before* marshalling so
328:                 * that the currentLsn and size are correct for utilization tracking.
329:                 */
330:                int entrySize;
331:                if (marshallOutsideLatch) {
332:                    entrySize = marshalledBuffer.limit();
333:                    assert header != null;
334:                } else {
335:                    assert header == null;
336:                    header = new LogEntryHeader(item, isProvisional,
337:                            shouldReplicate);
338:                    entrySize = header.getSize() + header.getItemSize();
339:                }
340:
341:                /*
342:                 * Get the next free slot in the log, under the log write latch.  Bump
343:                 * the LSN values, which gives us a valid previous pointer, which is
344:                 * part of the log entry header. That's why doing the checksum must be
345:                 * in the log write latch -- we need to bump the LSN first, and bumping
346:                 * the LSN must be done within the log write latch.
347:                 */
348:                if (forceNewLogFile) {
349:                    fileManager.forceNewLogFile();
350:                }
351:
352:                boolean flippedFile = fileManager.bumpLsn(entrySize);
353:                long currentLsn = DbLsn.NULL_LSN;
354:                boolean wakeupCleaner = false;
355:                boolean usedTemporaryBuffer = false;
356:                boolean success = false;
357:                try {
358:                    currentLsn = fileManager.getLastUsedLsn();
359:
360:                    /*
361:                     * countNewLogEntry and countObsoleteNodeInexact cannot change a
362:                     * FileSummaryLN size, so they are safe to call after
363:                     * getSizeForWrite.
364:                     */
365:                    wakeupCleaner = tracker.countNewLogEntry(currentLsn,
366:                            entryType, entrySize);
367:
368:                    /*
369:                     * LN deletions are obsolete immediately.  Inexact counting is
370:                     * used to save resources because the cleaner knows that all
371:                     * deleted LNs are obsolete.
372:                     */
373:                    if (item.countAsObsoleteWhenLogged()) {
374:                        tracker.countObsoleteNodeInexact(currentLsn, entryType,
375:                                entrySize);
376:                    }
377:
378:                    /*
379:                     * This item must be marshalled within the log write latch.
380:                     */
381:                    if (!marshallOutsideLatch) {
382:                        marshalledBuffer = marshallIntoBuffer(header, item,
383:                                isProvisional, shouldReplicate);
384:                    }
385:
386:                    /* Sanity check */
387:                    if (entrySize != marshalledBuffer.limit()) {
388:                        throw new DatabaseException("Logged item entrySize= "
389:                                + entrySize + " but marshalledSize="
390:                                + marshalledBuffer.limit() + " type="
391:                                + entryType + " currentLsn="
392:                                + DbLsn.getNoFormatString(currentLsn));
393:                    }
394:
395:                    /*
396:                     * Ask for a log buffer suitable for holding this new entry.  If
397:                     * the current log buffer is full, or if we flipped into a new
398:                     * file, write it to disk and get a new, empty log buffer to
399:                     * use. The returned buffer will be latched for write.
400:                     */
401:                    LogBuffer useLogBuffer = logBufferPool.getWriteBuffer(
402:                            entrySize, flippedFile);
403:
404:                    /* Add checksum, prev offset, VLSN to entry. */
405:                    marshalledBuffer = header.addPostMarshallingInfo(envImpl,
406:                            marshalledBuffer, fileManager.getPrevEntryOffset());
407:
408:                    /*
409:                     * If the LogBufferPool buffer (useBuffer) doesn't have sufficient
410:                     * space (since they're fixed size), just use the temporary buffer
411:                     * and throw it away when we're done.  That way we don't grow the
412:                     * LogBuffers in the pool permanently.  We risk an OOME on this
413:                     * temporary usage, but we'll risk it.  [#12674]
414:                     */
415:                    useLogBuffer.latchForWrite();
416:                    try {
417:                        ByteBuffer useBuffer = useLogBuffer.getDataBuffer();
418:                        if (useBuffer.capacity() - useBuffer.position() < entrySize) {
419:                            fileManager.writeLogBuffer(new LogBuffer(
420:                                    marshalledBuffer, currentLsn));
421:                            usedTemporaryBuffer = true;
422:                            assert useBuffer.position() == 0;
423:                            nTempBufferWrites++;
424:                        } else {
425:                            /* Copy marshalled object into write buffer. */
426:                            useBuffer.put(marshalledBuffer);
427:                        }
428:                    } finally {
429:                        useLogBuffer.release();
430:                    }
431:
432:                    /*
433:                     * If this is a replicated log entry and this site is part of a
434:                     * replication group, send this operation to other sites.
435:                     * The replication logic takes care of deciding whether this site
436:                     * is a master.
437:                     */
438:                    if (shouldReplicate) {
439:                        envImpl.getReplicator().replicateOperation(
440:                                Operation.PLACEHOLDER, marshalledBuffer);
441:                    }
442:                    success = true;
443:                } finally {
444:                    if (!success) {
445:
446:                        /*
447:                         * The LSN pointer, log buffer position, and corresponding file
448:                         * position march in lockstep.
449:                         *
450:                         * 1. We bump the LSN.
451:                         * 2. We copy loggable item into the log buffer.
452:                         * 3. We may try to write the log buffer.
453:                         *
454:                         * If we've failed to put the item into the log buffer (2), we
455:                         * need to restore old LSN state so that the log buffer doesn't
456:                         * have a hole. [SR #12638] If we fail after (2), we don't need
457:                         * to restore state, because log buffers will still match file
458:                         * positions.
459:                         */
460:                        fileManager.restoreLastPosition();
461:                    }
462:                }
463:
464:                /*
465:                 * Tell the log buffer pool that we finished the write.  Record the
466:                 * LSN against this logbuffer, and write the buffer to disk if
467:                 * needed.
468:                 */
469:                if (!usedTemporaryBuffer) {
470:                    logBufferPool.writeCompleted(currentLsn, flushRequired);
471:                }
472:
473:                /*
474:                 * If the txn is not null, the first item is an LN. Update the txn with
475:                 * info about the latest LSN. Note that this has to happen within the
476:                 * log write latch.
477:                 */
478:                item.postLogWork(currentLsn);
479:
480:                return new LogResult(currentLsn, wakeupCleaner, entrySize);
481:            }
482:
483:            /**
484:             * Serialize a loggable object into this buffer.
485:             */
486:            private ByteBuffer marshallIntoBuffer(LogEntryHeader header,
487:                    LogEntry item, boolean isProvisional,
488:                    boolean shouldReplicate) throws DatabaseException {
489:
490:                int entrySize = header.getSize() + header.getItemSize();
491:
492:                ByteBuffer destBuffer = ByteBuffer.allocate(entrySize);
493:                header.writeToLog(destBuffer);
494:
495:                /* Put the entry in. */
496:                item.writeEntry(header, destBuffer);
497:
498:                /* Some entries (LNs) save the last logged size. */
499:                item.setLastLoggedSize(entrySize);
500:
501:                /* Set the limit so it can be used as the size of the entry. */
502:                destBuffer.flip();
503:
504:                return destBuffer;
505:            }
506:
507:            /**
508:             * Serialize a log entry into this buffer with proper entry header. Return
509:             * it ready for a copy.
510:             */
511:            ByteBuffer putIntoBuffer(LogEntry item, long prevLogEntryOffset)
512:                    throws DatabaseException {
513:
514:                LogEntryHeader header = new LogEntryHeader(item, false, // isProvisional,
515:                        false); // shouldReplicate
516:
517:                ByteBuffer destBuffer = marshallIntoBuffer(header, item, false, // isProvisional
518:                        false); // shouldReplicate
519:
520:                return header.addPostMarshallingInfo(envImpl, destBuffer, 0); // lastOffset
521:            }
522:
523:            /*
524:             * Reading from the log.
525:             */
526:
527:            /**
528:             * Instantiate all the objects in the log entry at this LSN.
529:             * @param lsn location of entry in log.
530:             * @return log entry that embodies all the objects in the log entry.
531:             */
532:            public LogEntry getLogEntry(long lsn) throws DatabaseException {
533:
534:                /*
535:                 * Fail loudly if the environment is invalid.  A RunRecoveryException
536:                 * must have occurred.
537:                 */
538:                envImpl.checkIfInvalid();
539:
540:                /*
541:                 * Get a log source for the log entry which provides an abstraction
542:                 * that hides whether the entry is in a buffer or on disk. Will
543:                 * register as a reader for the buffer or the file, which will take a
544:                 * latch if necessary.
545:                 */
546:                LogSource logSource = getLogSource(lsn);
547:
548:                /* Read the log entry from the log source. */
549:                return getLogEntryFromLogSource(lsn, logSource);
550:            }
551:
552:            LogEntry getLogEntry(long lsn, RandomAccessFile file)
553:                    throws DatabaseException {
554:
555:                return getLogEntryFromLogSource(lsn, new FileSource(file,
556:                        readBufferSize, fileManager));
557:            }
558:
559:            /**
560:             * Instantiate all the objects in the log entry at this LSN. This will
561:             * release the log source at the first opportunity.
562:             *
563:             * @param lsn location of entry in log
564:             * @return log entry that embodies all the objects in the log entry
565:             */
566:            private LogEntry getLogEntryFromLogSource(long lsn,
567:                    LogSource logSource) throws DatabaseException {
568:
569:                try {
570:
571:                    /*
572:                     * Read the log entry header into a byte buffer. This assumes
573:                     * that the minimum size of this byte buffer (determined by
574:                     * je.log.faultReadSize) is always >= the maximum log entry header.
575:                     */
576:                    long fileOffset = DbLsn.getFileOffset(lsn);
577:                    ByteBuffer entryBuffer = logSource.getBytes(fileOffset);
578:                    assert ((entryBuffer.limit() - entryBuffer.position()) >= LogEntryHeader.MAX_HEADER_SIZE);
579:
580:                    /* Read the header */
581:                    LogEntryHeader header = new LogEntryHeader(envImpl,
582:                            entryBuffer, false); //anticipateChecksumErrors
583:                    header.readVariablePortion(entryBuffer);
584:
585:                    ChecksumValidator validator = null;
586:                    if (doChecksumOnRead) {
587:                        /* Add header to checksum bytes */
588:                        validator = new ChecksumValidator();
589:                        int headerSizeMinusChecksum = header
590:                                .getSizeMinusChecksum();
591:                        int itemStart = entryBuffer.position();
592:                        entryBuffer.position(itemStart
593:                                - headerSizeMinusChecksum);
594:                        validator.update(envImpl, entryBuffer,
595:                                headerSizeMinusChecksum, false); // anticipateChecksumErrors
596:                        entryBuffer.position(itemStart);
597:                    }
598:
599:                    /*
600:                     * Now that we know the size, read the rest of the entry
601:                     * if the first read didn't get enough.
602:                     */
603:                    int itemSize = header.getItemSize();
604:                    if (entryBuffer.remaining() < itemSize) {
605:                        entryBuffer = logSource.getBytes(fileOffset
606:                                + header.getSize(), itemSize);
607:                        nRepeatFaultReads++;
608:                    }
609:
610:                    /*
611:                     * Do entry validation. Run checksum before checking the entry
612:                     * type, it will be the more encompassing error.
613:                     */
614:                    if (doChecksumOnRead) {
615:                        /* Check the checksum first. */
616:                        validator.update(envImpl, entryBuffer, itemSize, false);
617:                        validator.validate(envImpl, header.getChecksum(), lsn);
618:                    }
619:
620:                    assert LogEntryType.isValidType(header.getType()) : "Read non-valid log entry type: "
621:                            + header.getType();
622:
623:                    /* Read the entry. */
624:                    LogEntry logEntry = LogEntryType.findType(header.getType(),
625:                            header.getVersion()).getNewLogEntry();
626:                    logEntry.readEntry(header, entryBuffer, true); // readFullItem
627:
628:                    /* Some entries (LNs) save the last logged size. */
629:                    logEntry.setLastLoggedSize(itemSize + header.getSize());
630:
631:                    /* For testing only; generate a read io exception. */
632:                    if (readHook != null) {
633:                        readHook.doIOHook();
634:                    }
635:
636:                    /*
637:                     * Done with the log source, release in the finally clause.  Note
638:                     * that the buffer we get back from logSource is just a duplicated
639:                     * buffer, where the position and state are copied but not the
640:                     * actual data. So we must not release the logSource until we are
641:                     * done marshalling the data from the buffer into the object
642:                     * itself.
643:                     */
644:                    return logEntry;
645:                } catch (DatabaseException e) {
646:
647:                    /*
648:                     * Propagate DatabaseExceptions, we want to preserve any subtypes
649:                     * for downstream handling.
650:                     */
651:                    throw e;
652:                } catch (ClosedChannelException e) {
653:
654:                    /*
655:                     * The channel should never be closed. It may be closed because
656:                     * of an interrupt received by another thread. See SR [#10463]
657:                     */
658:                    throw new RunRecoveryException(envImpl,
659:                            "Channel closed, may be "
660:                                    + "due to thread interrupt", e);
661:                } catch (Exception e) {
662:                    throw new DatabaseException(e);
663:                } finally {
664:                    if (logSource != null) {
665:                        logSource.release();
666:                    }
667:                }
668:            }
669:
670:            /**
671:             * Fault in the first object in the log entry log entry at this LSN.
672:             * @param lsn location of object in log
673:             * @return the object in the log
674:             */
675:            public Object get(long lsn) throws DatabaseException {
676:
677:                LogEntry entry = getLogEntry(lsn);
678:                return entry.getMainItem();
679:            }
680:
681:            /**
682:             * Find the LSN, whether in a file or still in the log buffers.
683:             * Is public for unit testing.
684:             */
685:            public LogSource getLogSource(long lsn) throws DatabaseException {
686:
687:                /*
688:                 * First look in log to see if this LSN is still in memory.
689:                 */
690:                LogBuffer logBuffer = logBufferPool.getReadBuffer(lsn);
691:
692:                if (logBuffer == null) {
693:                    try {
694:                        /* Not in the in-memory log -- read it off disk. */
695:                        return new FileHandleSource(fileManager
696:                                .getFileHandle(DbLsn.getFileNumber(lsn)),
697:                                readBufferSize, fileManager);
698:                    } catch (LogFileNotFoundException e) {
699:                        /* Add LSN to exception message. */
700:                        throw new LogFileNotFoundException(DbLsn
701:                                .getNoFormatString(lsn)
702:                                + ' ' + e.getMessage());
703:                    }
704:                } else {
705:                    return logBuffer;
706:                }
707:            }
708:
709:            /**
710:             * Flush all log entries, fsync the log file.
711:             */
712:            public void flush() throws DatabaseException {
713:
714:                if (!readOnly) {
715:                    flushInternal();
716:                    fileManager.syncLogEnd();
717:                }
718:            }
719:
720:            /**
721:             * May be used to avoid sync to speed unit tests.
722:             */
723:            public void flushNoSync() throws DatabaseException {
724:
725:                if (!readOnly) {
726:                    flushInternal();
727:                }
728:            }
729:
730:            abstract protected void flushInternal() throws LogException,
731:                    DatabaseException;
732:
733:            public void loadStats(StatsConfig config, EnvironmentStats stats)
734:                    throws DatabaseException {
735:
736:                stats.setNRepeatFaultReads(nRepeatFaultReads);
737:                stats.setNTempBufferWrites(nTempBufferWrites);
738:                if (config.getClear()) {
739:                    nRepeatFaultReads = 0;
740:                    nTempBufferWrites = 0;
741:                }
742:
743:                logBufferPool.loadStats(config, stats);
744:                fileManager.loadStats(config, stats);
745:                if (!config.getFast()) {
746:                    loadEndOfLogStat(stats);
747:                }
748:            }
749:
750:            /**
751:             * Returns a tracked summary for the given file which will not be flushed.
752:             * Used for watching changes that occur while a file is being cleaned.
753:             */
754:            abstract public TrackedFileSummary getUnflushableTrackedSummary(
755:                    long file) throws DatabaseException;
756:
757:            protected TrackedFileSummary getUnflushableTrackedSummaryInternal(
758:                    long file) throws DatabaseException {
759:
760:                return envImpl.getUtilizationTracker()
761:                        .getUnflushableTrackedSummary(file);
762:            }
763:
764:            /**
765:             * Removes the tracked summary for the given file.
766:             */
767:            abstract public void removeTrackedFile(TrackedFileSummary tfs)
768:                    throws DatabaseException;
769:
770:            protected void removeTrackedFileInternal(TrackedFileSummary tfs) {
771:                tfs.reset();
772:            }
773:
774:            /**
775:             * Count node as obsolete under the log write latch.  This is done here
776:             * because the log write latch is managed here, and all utilization
777:             * counting must be performed under the log write latch.
778:             */
779:            abstract public void countObsoleteNode(long lsn, LogEntryType type,
780:                    int size) throws DatabaseException;
781:
782:            protected void countObsoleteNodeInternal(
783:                    UtilizationTracker tracker, long lsn, LogEntryType type,
784:                    int size) throws DatabaseException {
785:
786:                tracker.countObsoleteNode(lsn, type, size);
787:            }
788:
789:            /**
790:             * Counts file summary info under the log write latch.
791:             */
792:            abstract public void countObsoleteNodes(
793:                    TrackedFileSummary[] summaries) throws DatabaseException;
794:
795:            protected void countObsoleteNodesInternal(
796:                    UtilizationTracker tracker, TrackedFileSummary[] summaries)
797:                    throws DatabaseException {
798:
799:                for (int i = 0; i < summaries.length; i += 1) {
800:                    TrackedFileSummary summary = summaries[i];
801:                    tracker.addSummary(summary.getFileNumber(), summary);
802:                }
803:            }
804:
805:            /**
806:             * Counts the given obsolete IN LSNs under the log write latch.
807:             */
808:            abstract public void countObsoleteINs(List lsnList)
809:                    throws DatabaseException;
810:
811:            protected void countObsoleteINsInternal(List lsnList)
812:                    throws DatabaseException {
813:
814:                UtilizationTracker tracker = envImpl.getUtilizationTracker();
815:
816:                for (int i = 0; i < lsnList.size(); i += 1) {
817:                    Long offset = (Long) lsnList.get(i);
818:                    tracker.countObsoleteNode(offset.longValue(),
819:                            LogEntryType.LOG_IN, 0);
820:                }
821:            }
822:
823:            public abstract void loadEndOfLogStat(EnvironmentStats stats)
824:                    throws DatabaseException;
825:
826:            void loadEndOfLogStatInternal(EnvironmentStats stats) {
827:                stats.setEndOfLog(fileManager.getLastUsedLsn());
828:            }
829:
830:            /* For unit testing only. */
831:            public void setReadHook(TestHook hook) {
832:                readHook = hook;
833:            }
834:
835:            /**
836:             * LogResult holds the multivalue return from logInternal.
837:             */
838:            static class LogResult {
839:                long currentLsn;
840:                boolean wakeupCleaner;
841:                int entrySize;
842:
843:                LogResult(long currentLsn, boolean wakeupCleaner, int entrySize) {
844:                    this.currentLsn = currentLsn;
845:                    this.wakeupCleaner = wakeupCleaner;
846:                    this.entrySize = entrySize;
847:                }
848:            }
849:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.