Source Code Cross Referenced for Cleaner.java in  » JMX » je » com » sleepycat » je » cleaner » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » JMX » je » com.sleepycat.je.cleaner 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


0001:        /*-
0002:         * See the file LICENSE for redistribution information.
0003:         *
0004:         * Copyright (c) 2002,2008 Oracle.  All rights reserved.
0005:         *
0006:         * $Id: Cleaner.java,v 1.183.2.9 2008/01/07 15:14:08 cwl Exp $
0007:         */
0008:
0009:        package com.sleepycat.je.cleaner;
0010:
0011:        import java.io.IOException;
0012:        import java.util.Arrays;
0013:        import java.util.Collections;
0014:        import java.util.Comparator;
0015:        import java.util.Iterator;
0016:        import java.util.Set;
0017:        import java.util.logging.Level;
0018:        import java.util.logging.Logger;
0019:
0020:        import com.sleepycat.je.DatabaseException;
0021:        import com.sleepycat.je.EnvironmentStats;
0022:        import com.sleepycat.je.StatsConfig;
0023:        import com.sleepycat.je.cleaner.FileSelector.CheckpointStartCleanerState;
0024:        import com.sleepycat.je.config.EnvironmentParams;
0025:        import com.sleepycat.je.dbi.DatabaseId;
0026:        import com.sleepycat.je.dbi.DatabaseImpl;
0027:        import com.sleepycat.je.dbi.DbConfigManager;
0028:        import com.sleepycat.je.dbi.DbTree;
0029:        import com.sleepycat.je.dbi.EnvConfigObserver;
0030:        import com.sleepycat.je.dbi.EnvironmentImpl;
0031:        import com.sleepycat.je.log.FileManager;
0032:        import com.sleepycat.je.tree.BIN;
0033:        import com.sleepycat.je.tree.ChildReference;
0034:        import com.sleepycat.je.tree.DIN;
0035:        import com.sleepycat.je.tree.LN;
0036:        import com.sleepycat.je.tree.Node;
0037:        import com.sleepycat.je.tree.Tree;
0038:        import com.sleepycat.je.tree.TreeLocation;
0039:        import com.sleepycat.je.txn.BasicLocker;
0040:        import com.sleepycat.je.txn.LockGrantType;
0041:        import com.sleepycat.je.txn.LockResult;
0042:        import com.sleepycat.je.txn.LockType;
0043:        import com.sleepycat.je.utilint.DaemonRunner;
0044:        import com.sleepycat.je.utilint.DbLsn;
0045:        import com.sleepycat.je.utilint.PropUtil;
0046:        import com.sleepycat.je.utilint.Tracer;
0047:
0048:        /**
0049:         * The Cleaner is responsible for effectively garbage collecting the JE log.
0050:         * It looks through log files and locates log records (IN's and LN's of all
0051:         * flavors) that are superceded by later versions.  Those that are "current"
0052:         * are propagated to a newer log file so that older log files can be deleted.
0053:         */
0054:        public class Cleaner implements  DaemonRunner, EnvConfigObserver {
0055:            /* From cleaner */
0056:            static final String CLEAN_IN = "CleanIN:";
0057:            static final String CLEAN_LN = "CleanLN:";
0058:            static final String CLEAN_MIGRATE_LN = "CleanMigrateLN:";
0059:            static final String CLEAN_PENDING_LN = "CleanPendingLN:";
0060:
0061:            /**
0062:             * Whether to fetch LNs for files in the to-be-cleaned set during lazy
0063:             * migration.  This is currently enabled because we do not support the
0064:             * dynamic addition of cleaner threads; that way, if the configured cleaner
0065:             * threads cannot keep up, we use proactive migration to keep up.
0066:             */
0067:            static final boolean PROACTIVE_MIGRATION = true;
0068:
0069:            /**
0070:             * Whether to update the IN generation count during searches.  This is
0071:             * currently disabled because 1) we update the generation of the BIN when
0072:             * we set a MIGRATE flag and 2) if the BIN is not evicted its parents will
0073:             * not be, so not updating the generation during the search has no benefit.
0074:             * By not updating the generation during searches for which we do NOT set
0075:             * the MIGRATE flag, we avoid holding INs in the cache that are not needed
0076:             * for lazy migration.  However, we do very few searches for obsolete LNs
0077:             * because the obsolete tracking info prevents this, so the benefit of not
0078:             * updating the generation during searches is questionable.  In other
0079:             * words, changing this setting will have little effect.
0080:             */
0081:            static final boolean UPDATE_GENERATION = false;
0082:
0083:            /**
0084:             * Whether the cleaner should participate in critical eviction.  Ideally
0085:             * the cleaner would not participate in eviction, since that would reduce
0086:             * the cost of cleaning.  However, the cleaner can add large numbers of
0087:             * nodes to the cache.  By not participating in eviction, other threads
0088:             * could be kept in a constant state of eviction and would effectively
0089:             * starve.  Therefore, this setting is currently enabled.
0090:             */
0091:            static final boolean DO_CRITICAL_EVICTION = true;
0092:
0093:            /*
0094:             * Cumulative counters.  Updates to these counters occur in multiple
0095:             * threads, including FileProcessor threads,  and are not synchronized.
0096:             * This could produce errors in counting, but avoids contention around stat
0097:             * updates.
0098:             */
0099:            int nBacklogFiles = 0;
0100:            int nCleanerRuns = 0;
0101:            int nCleanerDeletions = 0;
0102:            int nINsObsolete = 0;
0103:            int nINsCleaned = 0;
0104:            int nINsDead = 0;
0105:            int nINsMigrated = 0;
0106:            int nLNsObsolete = 0;
0107:            int nLNsCleaned = 0;
0108:            int nLNsDead = 0;
0109:            int nLNsLocked = 0;
0110:            int nLNsMigrated = 0;
0111:            int nLNsMarked = 0;
0112:            int nLNQueueHits = 0;
0113:            int nPendingLNsProcessed = 0;
0114:            int nMarkedLNsProcessed = 0;
0115:            int nToBeCleanedLNsProcessed = 0;
0116:            int nClusterLNsProcessed = 0;
0117:            int nPendingLNsLocked = 0;
0118:            int nEntriesRead = 0;
0119:            long nRepeatIteratorReads = 0;
0120:
0121:            /*
0122:             * Configuration parameters are non-private for use by FileProcessor,
0123:             * UtilizationTracker.
0124:             */
0125:            long lockTimeout;
0126:            int readBufferSize;
0127:            int lookAheadCacheSize;
0128:            int nDeadlockRetries;
0129:            boolean expunge;
0130:            boolean clusterResident;
0131:            boolean clusterAll;
0132:            int maxBatchFiles;
0133:            Level detailedTraceLevel;
0134:            long cleanerBytesInterval;
0135:            boolean trackDetail;
0136:            boolean fetchObsoleteSize;
0137:
0138:            /**
0139:             * All files that are to-be-cleaning or being-cleaned.  Used to perform
0140:             * proactive migration.  Is read-only after assignment, so no
0141:             * synchronization is needed.
0142:             */
0143:            Set mustBeCleanedFiles = Collections.EMPTY_SET;
0144:
0145:            /**
0146:             * All files that are below the minUtilization threshold.  Used to perform
0147:             * clustering migration.  Is read-only after assignment, so no
0148:             * synchronization is needed.
0149:             */
0150:            Set lowUtilizationFiles = Collections.EMPTY_SET;
0151:
0152:            private String name;
0153:            private EnvironmentImpl env;
0154:            private UtilizationProfile profile;
0155:            private UtilizationTracker tracker;
0156:            private FileSelector fileSelector;
0157:            private FileProcessor[] threads;
0158:
0159:            /*
0160:             * Log file deletion must check for the presence of read/only processes
0161:             * and ongoing backups.
0162:             */
0163:            private Object deleteFileLock;
0164:            private boolean deleteProhibited; // protected by deleteFileLock
0165:
0166:            public Cleaner(EnvironmentImpl env, String name)
0167:                    throws DatabaseException {
0168:
0169:                this .env = env;
0170:                this .name = name;
0171:                tracker = new UtilizationTracker(env, this );
0172:                profile = new UtilizationProfile(env, tracker);
0173:                fileSelector = new FileSelector();
0174:                threads = new FileProcessor[0];
0175:                deleteFileLock = new Object();
0176:
0177:                /*
0178:                 * The trackDetail property is immutable because of the complexity (if
0179:                 * it were mutable) in determining whether to update the memory budget
0180:                 * and perform eviction.
0181:                 */
0182:                trackDetail = env.getConfigManager().getBoolean(
0183:                        EnvironmentParams.CLEANER_TRACK_DETAIL);
0184:
0185:                /* Initialize mutable properties and register for notifications. */
0186:                envConfigUpdate(env.getConfigManager());
0187:                env.addConfigObserver(this );
0188:            }
0189:
0190:            /**
0191:             * Process notifications of mutable property changes.
0192:             */
0193:            public void envConfigUpdate(DbConfigManager cm)
0194:                    throws DatabaseException {
0195:
0196:                lockTimeout = PropUtil.microsToMillis(cm
0197:                        .getLong(EnvironmentParams.CLEANER_LOCK_TIMEOUT));
0198:
0199:                readBufferSize = cm.getInt(EnvironmentParams.CLEANER_READ_SIZE);
0200:                if (readBufferSize <= 0) {
0201:                    readBufferSize = cm
0202:                            .getInt(EnvironmentParams.LOG_ITERATOR_READ_SIZE);
0203:                }
0204:
0205:                lookAheadCacheSize = cm
0206:                        .getInt(EnvironmentParams.CLEANER_LOOK_AHEAD_CACHE_SIZE);
0207:
0208:                nDeadlockRetries = cm
0209:                        .getInt(EnvironmentParams.CLEANER_DEADLOCK_RETRY);
0210:
0211:                expunge = cm.getBoolean(EnvironmentParams.CLEANER_REMOVE);
0212:
0213:                clusterResident = cm
0214:                        .getBoolean(EnvironmentParams.CLEANER_CLUSTER);
0215:
0216:                clusterAll = cm
0217:                        .getBoolean(EnvironmentParams.CLEANER_CLUSTER_ALL);
0218:
0219:                maxBatchFiles = cm
0220:                        .getInt(EnvironmentParams.CLEANER_MAX_BATCH_FILES);
0221:
0222:                detailedTraceLevel = Tracer.parseLevel(env,
0223:                        EnvironmentParams.JE_LOGGING_LEVEL_CLEANER);
0224:
0225:                if (clusterResident && clusterAll) {
0226:                    throw new IllegalArgumentException("Both "
0227:                            + EnvironmentParams.CLEANER_CLUSTER + " and "
0228:                            + EnvironmentParams.CLEANER_CLUSTER_ALL
0229:                            + " may not be set to true.");
0230:                }
0231:
0232:                int nThreads = cm.getInt(EnvironmentParams.CLEANER_THREADS);
0233:                assert nThreads > 0;
0234:
0235:                if (nThreads != threads.length) {
0236:
0237:                    /* Shutdown threads when reducing their number. */
0238:                    for (int i = nThreads; i < threads.length; i += 1) {
0239:                        if (threads[i] != null) {
0240:                            threads[i].shutdown();
0241:                            threads[i] = null;
0242:                        }
0243:                    }
0244:
0245:                    /* Copy existing threads that are still used. */
0246:                    FileProcessor[] newThreads = new FileProcessor[nThreads];
0247:                    for (int i = 0; i < nThreads && i < threads.length; i += 1) {
0248:                        newThreads[i] = threads[i];
0249:                    }
0250:
0251:                    /* Don't lose track of new threads if an exception occurs. */
0252:                    threads = newThreads;
0253:
0254:                    /* Start new threads when increasing their number. */
0255:                    for (int i = 0; i < nThreads; i += 1) {
0256:                        if (threads[i] == null) {
0257:                            threads[i] = new FileProcessor(
0258:                                    name + '-' + (i + 1), env, this , profile,
0259:                                    fileSelector);
0260:                        }
0261:                    }
0262:                }
0263:
0264:                cleanerBytesInterval = cm
0265:                        .getLong(EnvironmentParams.CLEANER_BYTES_INTERVAL);
0266:                if (cleanerBytesInterval == 0) {
0267:                    cleanerBytesInterval = cm
0268:                            .getLong(EnvironmentParams.LOG_FILE_MAX) / 4;
0269:                }
0270:
0271:                fetchObsoleteSize = cm
0272:                        .getBoolean(EnvironmentParams.CLEANER_FETCH_OBSOLETE_SIZE);
0273:            }
0274:
0275:            public UtilizationTracker getUtilizationTracker() {
0276:                return tracker;
0277:            }
0278:
0279:            public UtilizationProfile getUtilizationProfile() {
0280:                return profile;
0281:            }
0282:
0283:            public boolean getFetchObsoleteSize() {
0284:                return fetchObsoleteSize;
0285:            }
0286:
0287:            /*
0288:             * Delegate the run/pause/wakeup/shutdown DaemonRunner operations.  We
0289:             * always check for null to account for the possibility of exceptions
0290:             * during thread creation.  Cleaner daemon can't ever be run if No Locking
0291:             * mode is enabled.
0292:             */
0293:            public void runOrPause(boolean run) {
0294:                if (!env.isNoLocking()) {
0295:                    for (int i = 0; i < threads.length; i += 1) {
0296:                        FileProcessor processor = threads[i];
0297:                        if (processor != null) {
0298:
0299:                            /*
0300:                             * When the cleaner is set to run, we need to wake up the
0301:                             * thread immediately since there may be a backlog of files
0302:                             * to clean.  But we must not block here if a file is
0303:                             * currently being processing.  Therefore we force a wakeup
0304:                             * by adding a work item.  This functionality may
0305:                             * eventually be moved to DaemonThread since it applies to
0306:                             * other deamons.  [#15158]
0307:                             */
0308:                            if (run) {
0309:                                processor.addSentinalWorkObject();
0310:                            }
0311:                            processor.runOrPause(run);
0312:                        }
0313:                    }
0314:                }
0315:            }
0316:
0317:            public void wakeup() {
0318:                for (int i = 0; i < threads.length; i += 1) {
0319:                    if (threads[i] != null) {
0320:                        threads[i].wakeup();
0321:                    }
0322:                }
0323:            }
0324:
0325:            public void requestShutdown() {
0326:                for (int i = 0; i < threads.length; i += 1) {
0327:                    if (threads[i] != null) {
0328:                        threads[i].requestShutdown();
0329:                    }
0330:                }
0331:            }
0332:
0333:            public void shutdown() {
0334:                for (int i = 0; i < threads.length; i += 1) {
0335:                    if (threads[i] != null) {
0336:                        threads[i].shutdown();
0337:                        threads[i].clearEnv();
0338:                        threads[i] = null;
0339:                    }
0340:                }
0341:            }
0342:
0343:            public int getNWakeupRequests() {
0344:                int count = 0;
0345:                for (int i = 0; i < threads.length; i += 1) {
0346:                    if (threads[i] != null) {
0347:                        count += threads[i].getNWakeupRequests();
0348:                    }
0349:                }
0350:                return count;
0351:            }
0352:
0353:            private boolean areThreadsRunning() {
0354:                for (int i = 0; i < threads.length; i += 1) {
0355:                    if (threads[i] != null) {
0356:                        return threads[i].isRunning();
0357:                    }
0358:                }
0359:                return false;
0360:            }
0361:
0362:            /**
0363:             * Cleans selected files and returns the number of files cleaned.  This
0364:             * method is not invoked by a deamon thread, it is programatically.
0365:             *
0366:             * @param cleanMultipleFiles is true to clean until we're under budget,
0367:             * or false to clean at most one file.
0368:             *
0369:             * @param forceCleaning is true to clean even if we're not under the
0370:             * utilization threshold.
0371:             *
0372:             * @return the number of files cleaned, not including files cleaned
0373:             * unsuccessfully.
0374:             */
0375:            public int doClean(boolean cleanMultipleFiles, boolean forceCleaning)
0376:                    throws DatabaseException {
0377:
0378:                FileProcessor processor = new FileProcessor("", env, this ,
0379:                        profile, fileSelector);
0380:                return processor.doClean(false /*invokedFromDaemon*/,
0381:                        cleanMultipleFiles, forceCleaning);
0382:            }
0383:
0384:            /**
0385:             * Load stats.
0386:             */
0387:            public void loadStats(StatsConfig config, EnvironmentStats stat)
0388:                    throws DatabaseException {
0389:
0390:                stat.setCleanerBacklog(nBacklogFiles);
0391:                stat.setNCleanerRuns(nCleanerRuns);
0392:                stat.setNCleanerDeletions(nCleanerDeletions);
0393:                stat.setNINsObsolete(nINsObsolete);
0394:                stat.setNINsCleaned(nINsCleaned);
0395:                stat.setNINsDead(nINsDead);
0396:                stat.setNINsMigrated(nINsMigrated);
0397:                stat.setNLNsObsolete(nLNsObsolete);
0398:                stat.setNLNsCleaned(nLNsCleaned);
0399:                stat.setNLNsDead(nLNsDead);
0400:                stat.setNLNsLocked(nLNsLocked);
0401:                stat.setNLNsMigrated(nLNsMigrated);
0402:                stat.setNLNsMarked(nLNsMarked);
0403:                stat.setNLNQueueHits(nLNQueueHits);
0404:                stat.setNPendingLNsProcessed(nPendingLNsProcessed);
0405:                stat.setNMarkedLNsProcessed(nMarkedLNsProcessed);
0406:                stat.setNToBeCleanedLNsProcessed(nToBeCleanedLNsProcessed);
0407:                stat.setNClusterLNsProcessed(nClusterLNsProcessed);
0408:                stat.setNPendingLNsLocked(nPendingLNsLocked);
0409:                stat.setNCleanerEntriesRead(nEntriesRead);
0410:                stat.setNRepeatIteratorReads(nRepeatIteratorReads);
0411:                stat.setTotalLogSize(profile.getTotalLogSize());
0412:
0413:                if (config.getClear()) {
0414:                    nCleanerRuns = 0;
0415:                    nCleanerDeletions = 0;
0416:                    nINsObsolete = 0;
0417:                    nINsCleaned = 0;
0418:                    nINsDead = 0;
0419:                    nINsMigrated = 0;
0420:                    nLNsObsolete = 0;
0421:                    nLNsCleaned = 0;
0422:                    nLNsDead = 0;
0423:                    nLNsLocked = 0;
0424:                    nLNsMigrated = 0;
0425:                    nLNsMarked = 0;
0426:                    nLNQueueHits = 0;
0427:                    nPendingLNsProcessed = 0;
0428:                    nMarkedLNsProcessed = 0;
0429:                    nToBeCleanedLNsProcessed = 0;
0430:                    nClusterLNsProcessed = 0;
0431:                    nPendingLNsLocked = 0;
0432:                    nEntriesRead = 0;
0433:                    nRepeatIteratorReads = 0;
0434:                }
0435:            }
0436:
0437:            /**
0438:             * For unit testing.
0439:             */
0440:            void injectFileForCleaning(Long fileNum) {
0441:                fileSelector.putBackFileForCleaning(fileNum);
0442:            }
0443:
0444:            /**
0445:             * Deletes all files that are safe-to-delete, if there are no read/only
0446:             * processes and concurrent backups.
0447:             *
0448:             * Deletion is coordinated by the synchronization variable deleteFileLock
0449:             * AND by the deleteProhibited state variable. The reason that two
0450:             * different mechanisms are use is because file deletion must be prevented
0451:             * both inter and intra-process. File locks must be used for inter-process,
0452:             * and the state bit for intra-process.
0453:             *
0454:             * To guard against read/only processes, the would-be deleter tries to get
0455:             * an exclusive lock on the environment. This will not be possible if a
0456:             * read/only process exists.
0457:             *
0458:             * To guard against backup mode, the would-be deleter checks the
0459:             * deleteProhibited state. Backup and file deletion can only be carried out
0460:             * by a read/write process, so both activities are working in the same
0461:             * process. Note that file locks are not supported intra-process. The
0462:             * deleteProhibited state is used rather than a simple synchronization on
0463:             * deleteFileLock because the start/endBackup is controlled by the
0464:             * application, and the copying of log files can take an arbitrarily long
0465:             * time. Using synchronization on deleteFileLock would make it possible to
0466:             * lock out a cleaner thread for an unacceptable amount of time.
0467:             */
0468:            void deleteSafeToDeleteFiles() throws DatabaseException {
0469:
0470:                /*
0471:                 * Synchronized to prevent multiple threads from requesting the same
0472:                 * file lock.
0473:                 */
0474:                synchronized (deleteFileLock) {
0475:                    if (deleteProhibited) {
0476:                        return; /* deletion disabled. */
0477:                    }
0478:
0479:                    Set safeFiles = fileSelector.copySafeToDeleteFiles();
0480:                    if (safeFiles == null) {
0481:                        return; /* Nothing to do. */
0482:                    }
0483:
0484:                    /*
0485:                     * Fail loudly if the environment is invalid.  A
0486:                     * RunRecoveryException must have occurred.
0487:                     */
0488:                    env.checkIfInvalid();
0489:
0490:                    /*
0491:                     * Fail silently if the environment is not open.
0492:                     */
0493:                    if (env.mayNotWrite()) {
0494:                        return;
0495:                    }
0496:
0497:                    /*
0498:                     * If we can't get an exclusive lock, then there are reader
0499:                     * processes and we can't delete any cleaned files.
0500:                     */
0501:                    if (!env.getFileManager().lockEnvironment(false, true)) {
0502:                        Tracer
0503:                                .trace(
0504:                                        Level.SEVERE,
0505:                                        env,
0506:                                        "Cleaner has "
0507:                                                + safeFiles.size()
0508:                                                + " files not deleted because of read-only processes.");
0509:                        return;
0510:                    }
0511:
0512:                    try {
0513:                        for (Iterator i = safeFiles.iterator(); i.hasNext();) {
0514:                            Long fileNum = (Long) i.next();
0515:                            long fileNumValue = fileNum.longValue();
0516:                            boolean deleted = false;
0517:                            try {
0518:                                if (expunge) {
0519:                                    env.getFileManager().deleteFile(
0520:                                            fileNumValue);
0521:                                } else {
0522:                                    env.getFileManager().renameFile(
0523:                                            fileNumValue,
0524:                                            FileManager.DEL_SUFFIX);
0525:                                }
0526:                                deleted = true;
0527:                            } catch (DatabaseException e) {
0528:                                traceFileNotDeleted(e, fileNumValue);
0529:                            } catch (IOException e) {
0530:                                traceFileNotDeleted(e, fileNumValue);
0531:                            }
0532:
0533:                            /*
0534:                             * If the log file was not deleted, leave it in the
0535:                             * safe-to-delete set (and the UP) so that we will retry
0536:                             * the deletion later.  If the log file was deleted, trace
0537:                             * the deletion, delete the file from the UP and from the
0538:                             * safe-to-delete set.
0539:                             *
0540:                             * We do not retry if an error occurs deleting the UP
0541:                             * database entries below.  Retrying is intended only to
0542:                             * solve a problem on Windows where deleting a log file
0543:                             * isn't always possible immediately after closing it.
0544:                             */
0545:                            if (deleted) {
0546:                                Tracer
0547:                                        .trace(
0548:                                                Level.SEVERE,
0549:                                                env,
0550:                                                "Cleaner deleted file 0x"
0551:                                                        + Long
0552:                                                                .toHexString(fileNumValue));
0553:
0554:                                /*
0555:                                 * Remove the file from the profile before removing
0556:                                 * it from the safe-to-delete set.  If we remove in the
0557:                                 * reverse order, it may be selected for cleaning.
0558:                                 * Always delete the file from the safe-to-delete set
0559:                                 * (in a finally block) so that we don't attempt to
0560:                                 * delete the file again.
0561:                                 */
0562:                                try {
0563:                                    profile.removeFile(fileNum);
0564:                                } finally {
0565:                                    fileSelector.removeDeletedFile(fileNum);
0566:                                }
0567:                            }
0568:                            nCleanerDeletions++;
0569:                        }
0570:                    } finally {
0571:                        env.getFileManager().releaseExclusiveLock();
0572:                    }
0573:                }
0574:            }
0575:
0576:            public void setDeleteProhibited() {
0577:
0578:                synchronized (deleteFileLock) {
0579:                    deleteProhibited = true;
0580:                }
0581:            }
0582:
0583:            public void clearDeleteProhibited() {
0584:                synchronized (deleteFileLock) {
0585:                    deleteProhibited = false;
0586:                }
0587:            }
0588:
0589:            private void traceFileNotDeleted(Exception e, long fileNum) {
0590:                Tracer
0591:                        .trace(
0592:                                env,
0593:                                "Cleaner",
0594:                                "deleteSafeToDeleteFiles",
0595:                                "Log file 0x"
0596:                                        + Long.toHexString(fileNum)
0597:                                        + " could not be "
0598:                                        + (expunge ? "deleted" : "renamed")
0599:                                        + ".  This operation will be retried at the next checkpoint.",
0600:                                e);
0601:            }
0602:
0603:            /**
0604:             * Returns a copy of the cleaned and processed files at the time a
0605:             * checkpoint starts.
0606:             *
0607:             * <p>If non-null is returned, the checkpoint should flush an extra level,
0608:             * and addCheckpointedFiles() should be called when the checkpoint is
0609:             * complete.</p>
0610:             */
0611:            public CheckpointStartCleanerState getFilesAtCheckpointStart()
0612:                    throws DatabaseException {
0613:
0614:                /* Pending LNs can prevent file deletion. */
0615:                processPending();
0616:
0617:                return fileSelector.getFilesAtCheckpointStart();
0618:            }
0619:
0620:            /**
0621:             * When a checkpoint is complete, update the files that were returned at
0622:             * the beginning of the checkpoint.
0623:             */
0624:            public void updateFilesAtCheckpointEnd(
0625:                    CheckpointStartCleanerState info) throws DatabaseException {
0626:
0627:                fileSelector.updateFilesAtCheckpointEnd(info);
0628:                deleteSafeToDeleteFiles();
0629:            }
0630:
0631:            /**
0632:             * Update the lowUtilizationFiles and mustBeCleanedFiles fields with new
0633:             * read-only collections, and update the backlog file count.
0634:             */
0635:            public void updateReadOnlyFileCollections() {
0636:                mustBeCleanedFiles = fileSelector.getMustBeCleanedFiles();
0637:                lowUtilizationFiles = fileSelector.getLowUtilizationFiles();
0638:                nBacklogFiles = fileSelector.getBacklog();
0639:            }
0640:
0641:            /**
0642:             * If any LNs are pending, process them.  This method should be called
0643:             * often enough to prevent the pending LN set from growing too large.
0644:             */
0645:            void processPending() throws DatabaseException {
0646:
0647:                DbTree dbMapTree = env.getDbMapTree();
0648:
0649:                LNInfo[] pendingLNs = fileSelector.getPendingLNs();
0650:                if (pendingLNs != null) {
0651:                    TreeLocation location = new TreeLocation();
0652:
0653:                    for (int i = 0; i < pendingLNs.length; i += 1) {
0654:                        LNInfo info = pendingLNs[i];
0655:
0656:                        DatabaseId dbId = info.getDbId();
0657:                        DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout);
0658:                        try {
0659:                            byte[] key = info.getKey();
0660:                            byte[] dupKey = info.getDupKey();
0661:                            LN ln = info.getLN();
0662:
0663:                            /* Evict before processing each entry. */
0664:                            if (DO_CRITICAL_EVICTION) {
0665:                                env.getEvictor().doCriticalEviction(true); // backgroundIO
0666:                            }
0667:
0668:                            processPendingLN(ln, db, key, dupKey, location);
0669:                        } finally {
0670:                            dbMapTree.releaseDb(db);
0671:                        }
0672:
0673:                        /* Sleep if background read/write limit was exceeded. */
0674:                        env.sleepAfterBackgroundIO();
0675:                    }
0676:                }
0677:
0678:                DatabaseId[] pendingDBs = fileSelector.getPendingDBs();
0679:                if (pendingDBs != null) {
0680:                    for (int i = 0; i < pendingDBs.length; i += 1) {
0681:                        DatabaseId dbId = pendingDBs[i];
0682:                        DatabaseImpl db = dbMapTree.getDb(dbId, lockTimeout);
0683:                        try {
0684:                            if (db == null || db.isDeleteFinished()) {
0685:                                fileSelector.removePendingDB(dbId);
0686:                            }
0687:                        } finally {
0688:                            dbMapTree.releaseDb(db);
0689:                        }
0690:                    }
0691:                }
0692:            }
0693:
0694:            /**
0695:             * Processes a pending LN, getting the lock first to ensure that the
0696:             * overhead of retries is mimimal.
0697:             */
0698:            private void processPendingLN(LN ln, DatabaseImpl db, byte[] key,
0699:                    byte[] dupKey, TreeLocation location)
0700:                    throws DatabaseException {
0701:
0702:                boolean parentFound = false; // We found the parent BIN.
0703:                boolean processedHere = true; // The LN was cleaned here.
0704:                boolean lockDenied = false; // The LN lock was denied.
0705:                boolean obsolete = false; // The LN is no longer in use.
0706:                boolean completed = false; // This method completed.
0707:
0708:                BasicLocker locker = null;
0709:                BIN bin = null;
0710:                DIN parentDIN = null;
0711:                try {
0712:                    nPendingLNsProcessed++;
0713:
0714:                    /*
0715:                     * If the DB is gone, this LN is obsolete.  If delete cleanup is in
0716:                     * progress, put the DB into the DB pending set; this LN will be
0717:                     * declared deleted after the delete cleanup is finished.
0718:                     */
0719:                    if (db == null || db.isDeleted()) {
0720:                        addPendingDB(db);
0721:                        nLNsDead++;
0722:                        obsolete = true;
0723:                        completed = true;
0724:                        return;
0725:                    }
0726:
0727:                    Tree tree = db.getTree();
0728:                    assert tree != null;
0729:
0730:                    /* Get a non-blocking lock on the original node ID. */
0731:
0732:                    locker = new BasicLocker(env);
0733:                    LockResult lockRet = locker.nonBlockingLock(ln.getNodeId(),
0734:                            LockType.READ, db);
0735:                    if (lockRet.getLockGrant() == LockGrantType.DENIED) {
0736:                        /* Try again later. */
0737:                        nPendingLNsLocked++;
0738:                        lockDenied = true;
0739:                        completed = true;
0740:                        return;
0741:                    }
0742:
0743:                    /*
0744:                     * Search down to the bottom most level for the parent of this LN.
0745:                     *
0746:                     * We pass searchDupTree=true to search the dup tree by nodeID if
0747:                     * necessary.  This handles the case where dupKey is null because
0748:                     * the pending entry was a deleted single-duplicate in a BIN.
0749:                     */
0750:                    parentFound = tree.getParentBINForChildLN(location, key,
0751:                            dupKey, ln, false, // splitsAllowed
0752:                            true, // findDeletedEntries
0753:                            true, // searchDupTree
0754:                            UPDATE_GENERATION);
0755:                    bin = location.bin;
0756:                    int index = location.index;
0757:
0758:                    if (!parentFound) {
0759:                        nLNsDead++;
0760:                        obsolete = true;
0761:                        completed = true;
0762:                        return;
0763:                    }
0764:
0765:                    if (ln.containsDuplicates()) {
0766:                        /* Migrate a DupCountLN. */
0767:                        parentDIN = (DIN) bin.fetchTarget(index);
0768:                        parentDIN.latch(UPDATE_GENERATION);
0769:                        ChildReference dclRef = parentDIN.getDupCountLNRef();
0770:                        processedHere = false;
0771:                        migrateDupCountLN(db, dclRef.getLsn(), parentDIN,
0772:                                dclRef, true, // wasCleaned
0773:                                true, // isPending
0774:                                ln.getNodeId(), // lockedPendingNodeId
0775:                                CLEAN_PENDING_LN);
0776:                    } else {
0777:                        /* Migrate a plain LN. */
0778:                        processedHere = false;
0779:                        migrateLN(db, bin.getLsn(index), bin, index, true, // wasCleaned
0780:                                true, // isPending
0781:                                ln.getNodeId(), // lockedPendingNodeId
0782:                                true, // backgroundIO
0783:                                CLEAN_PENDING_LN);
0784:                    }
0785:                    completed = true;
0786:                } catch (DatabaseException DBE) {
0787:                    DBE.printStackTrace();
0788:                    Tracer.trace(env, "com.sleepycat.je.cleaner.Cleaner",
0789:                            "processLN", "Exception thrown: ", DBE);
0790:                    throw DBE;
0791:                } finally {
0792:                    if (parentDIN != null) {
0793:                        parentDIN.releaseLatchIfOwner();
0794:                    }
0795:
0796:                    if (bin != null) {
0797:                        bin.releaseLatchIfOwner();
0798:                    }
0799:
0800:                    if (locker != null) {
0801:                        locker.operationEnd();
0802:                    }
0803:
0804:                    /*
0805:                     * If migrateLN was not called above, remove the pending LN and
0806:                     * perform tracing in this method.
0807:                     */
0808:                    if (processedHere) {
0809:                        if (completed && !lockDenied) {
0810:                            fileSelector.removePendingLN(ln.getNodeId());
0811:                        }
0812:                        trace(detailedTraceLevel, CLEAN_PENDING_LN, ln,
0813:                                DbLsn.NULL_LSN, completed, obsolete, false /*migrated*/);
0814:                    }
0815:                }
0816:            }
0817:
0818:            /**
0819:             * Returns whether the given BIN entry may be stripped by the evictor.
0820:             * True is always returned if the BIN is not dirty.  False is returned if
0821:             * the BIN is dirty and the entry will be migrated soon.
0822:             */
0823:            public boolean isEvictable(BIN bin, int index) {
0824:
0825:                if (bin.getDirty()) {
0826:
0827:                    if (bin.getMigrate(index)) {
0828:                        return false;
0829:                    }
0830:
0831:                    long lsn = bin.getLsn(index);
0832:                    if (lsn == DbLsn.NULL_LSN) {
0833:
0834:                        /*
0835:                         * LN is resident but never logged, no cleaning restrictions
0836:                         * apply.
0837:                         */
0838:                        return true;
0839:                    }
0840:
0841:                    boolean isResident = (bin.getTarget(index) != null);
0842:                    Long fileNum = new Long(DbLsn.getFileNumber(lsn));
0843:
0844:                    if ((PROACTIVE_MIGRATION || isResident)
0845:                            && mustBeCleanedFiles.contains(fileNum)) {
0846:                        return false;
0847:                    }
0848:
0849:                    if ((clusterAll || (clusterResident && isResident))
0850:                            && lowUtilizationFiles.contains(fileNum)) {
0851:                        return false;
0852:                    }
0853:                }
0854:
0855:                return true;
0856:            }
0857:
0858:            /**
0859:             * This method should be called just before logging a BIN.  LNs will be
0860:             * migrated if the MIGRATE flag is set, or if they are in a file to be
0861:             * cleaned, or if the LNs qualify according to the rules for cluster and
0862:             * clusterAll.
0863:             *
0864:             * <p>On return this method guarantees that no MIGRATE flag will be set on
0865:             * any child entry.  If this method is *not* called before logging a BIN,
0866:             * then the addPendingLNs method must be called.</p>
0867:             *
0868:             * @param bin is the latched BIN.  The latch will not be released by this
0869:             * method.
0870:             *
0871:             * @param proactiveMigration perform proactive migration if needed; this is
0872:             * false during a split, to reduce the delay in the user operation.
0873:             */
0874:            public void lazyMigrateLNs(final BIN bin,
0875:                    boolean proactiveMigration, boolean backgroundIO)
0876:                    throws DatabaseException {
0877:
0878:                DatabaseImpl db = bin.getDatabase();
0879:
0880:                boolean isBinInDupDb = db.getSortedDuplicates()
0881:                        && !bin.containsDuplicates();
0882:
0883:                /*
0884:                 * For non-resident LNs, sort them by LSN before migrating them.
0885:                 * Fetching in LSN order reduces physical disk I/O.
0886:                 */
0887:                Integer[] sortedIndices = null;
0888:                int nSortedIndices = 0;
0889:                int nEntries = bin.getNEntries();
0890:
0891:                for (int index = 0; index < nEntries; index += 1) {
0892:
0893:                    boolean migrateFlag = bin.getMigrate(index);
0894:                    boolean isResident = (bin.getTarget(index) != null);
0895:                    long childLsn = bin.getLsn(index);
0896:
0897:                    if (childLsn != DbLsn.NULL_LSN) {
0898:                        /* LSN could be NULL_LSN if deferred-write mode */
0899:
0900:                        if (shouldMigrateLN(migrateFlag, isResident,
0901:                                proactiveMigration, isBinInDupDb, childLsn)) {
0902:
0903:                            if (isResident) {
0904:                                migrateLN(db, childLsn, bin, index,
0905:                                        migrateFlag, // wasCleaned
0906:                                        false, // isPending
0907:                                        0, // lockedPendingNodeId
0908:                                        backgroundIO, CLEAN_MIGRATE_LN);
0909:                            } else {
0910:                                if (sortedIndices == null) {
0911:                                    sortedIndices = new Integer[nEntries];
0912:                                }
0913:                                sortedIndices[nSortedIndices++] = new Integer(
0914:                                        index);
0915:                            }
0916:                        }
0917:                    }
0918:                }
0919:
0920:                if (sortedIndices != null) {
0921:                    Arrays.sort(sortedIndices, 0, nSortedIndices,
0922:                            new Comparator() {
0923:                                public int compare(Object o1, Object o2) {
0924:                                    int i1 = ((Integer) o1).intValue();
0925:                                    int i2 = ((Integer) o2).intValue();
0926:                                    return DbLsn.compareTo(bin.getLsn(i1), bin
0927:                                            .getLsn(i2));
0928:                                }
0929:                            });
0930:                    for (int i = 0; i < nSortedIndices; i += 1) {
0931:                        int index = sortedIndices[i].intValue();
0932:                        long childLsn = bin.getLsn(index);
0933:                        boolean migrateFlag = bin.getMigrate(index);
0934:                        migrateLN(db, childLsn, bin, index, migrateFlag, // wasCleaned
0935:                                false, // isPending
0936:                                0, // lockedPendingNodeId
0937:                                backgroundIO, CLEAN_MIGRATE_LN);
0938:                    }
0939:                }
0940:            }
0941:
0942:            /**
0943:             * This method should be called just before logging a root DIN.  The
0944:             * DupCountLN will be migrated if the MIGRATE flag is set, or if it is in a
0945:             * file to be cleaned, or if the LN qualifies according to the rules for
0946:             * cluster and clusterAll.
0947:             *
0948:             * <p>On return this method guarantees that the MIGRATE flag will not be
0949:             * set on the child entry.  If this method is *not* called before logging a
0950:             * root DIN, then the addPendingDupCountLN method must be called.</p>
0951:             *
0952:             * @param din is the latched DIN.  The latch will not be released by this
0953:             * method.
0954:             *
0955:             * @param dclRef is the reference to the DupCountLN.
0956:             *
0957:             * @param proactiveMigration perform proactive migration if needed; this is
0958:             * false during a split, to reduce the delay in the user operation.
0959:             */
0960:            public void lazyMigrateDupCountLN(DIN din, ChildReference dclRef,
0961:                    boolean proactiveMigration) throws DatabaseException {
0962:
0963:                DatabaseImpl db = din.getDatabase();
0964:
0965:                boolean migrateFlag = dclRef.getMigrate();
0966:                boolean isResident = (dclRef.getTarget() != null);
0967:                boolean isBinInDupDb = false;
0968:                long childLsn = dclRef.getLsn();
0969:
0970:                if (shouldMigrateLN(migrateFlag, isResident,
0971:                        proactiveMigration, isBinInDupDb, childLsn)) {
0972:
0973:                    migrateDupCountLN(db, childLsn, din, dclRef, migrateFlag, // wasCleaned
0974:                            false, // isPending
0975:                            0, // lockedPendingNodeId
0976:                            CLEAN_MIGRATE_LN);
0977:                }
0978:            }
0979:
0980:            /**
0981:             * Returns whether an LN entry should be migrated.  Updates stats.
0982:             *
0983:             * @param migrateFlag is whether the MIGRATE flag is set on the entry.
0984:             *
0985:             * @param isResident is whether the LN is currently resident.
0986:             *
0987:             * @param proactiveMigration perform proactive migration if needed; this is
0988:             * false during a split, to reduce the delay in the user operation.
0989:             *
0990:             * @param isBinInDupDb is whether this is a BIN entry in a database with
0991:             * duplicates enabled.
0992:             *
0993:             * @param childLsn is the LSN of the LN.
0994:             *
0995:             * @return whether to migrate the LN.
0996:             */
0997:            private boolean shouldMigrateLN(boolean migrateFlag,
0998:                    boolean isResident, boolean proactiveMigration,
0999:                    boolean isBinInDupDb, long childLsn) {
1000:                boolean doMigration = false;
1001:
1002:                if (migrateFlag) {
1003:
1004:                    /*
1005:                     * Always try to migrate if the MIGRATE flag is set, since the LN
1006:                     * has been processed.  If we did not migrate it, we would have to
1007:                     * add it to pending LN set.
1008:                     */
1009:                    doMigration = true;
1010:                    nMarkedLNsProcessed++;
1011:
1012:                } else if (!proactiveMigration || isBinInDupDb
1013:                        || env.isClosing()) {
1014:
1015:                    /*
1016:                     * Do nothing if proactiveMigration is false, since all further
1017:                     * migration is optional.
1018:                     *
1019:                     * Do nothing if this is a BIN in a duplicate database.  We
1020:                     * must not fetch DINs, since this BIN may be about to be
1021:                     * evicted.  Fetching a DIN would add it as an orphan to the
1022:                     * INList, plus an IN with non-LN children is not evictable.
1023:                     *
1024:                     * Do nothing if the environment is shutting down and the
1025:                     * MIGRATE flag is not set.  Proactive migration during
1026:                     * shutdown is counterproductive -- it prevents a short final
1027:                     * checkpoint, and it does not allow more files to be deleted.
1028:                     */
1029:
1030:                } else {
1031:
1032:                    Long fileNum = new Long(DbLsn.getFileNumber(childLsn));
1033:
1034:                    if ((PROACTIVE_MIGRATION || isResident)
1035:                            && mustBeCleanedFiles.contains(fileNum)) {
1036:
1037:                        /* Migrate because it will be cleaned soon. */
1038:                        doMigration = true;
1039:                        nToBeCleanedLNsProcessed++;
1040:
1041:                    } else if ((clusterAll || (clusterResident && isResident))
1042:                            && lowUtilizationFiles.contains(fileNum)) {
1043:
1044:                        /* Migrate for clustering. */
1045:                        doMigration = true;
1046:                        nClusterLNsProcessed++;
1047:                    }
1048:                }
1049:
1050:                return doMigration;
1051:            }
1052:
1053:            /**
1054:             * Migrate an LN in the given BIN entry, if it is not obsolete.  The BIN is
1055:             * latched on entry to this method and is left latched when it returns.
1056:             */
1057:            private void migrateLN(DatabaseImpl db, long lsn, BIN bin,
1058:                    int index, boolean wasCleaned, boolean isPending,
1059:                    long lockedPendingNodeId, boolean backgroundIO,
1060:                    String cleanAction) throws DatabaseException {
1061:
1062:                /* Status variables are used to generate debug tracing info. */
1063:                boolean obsolete = false; // The LN is no longer in use.
1064:                boolean migrated = false; // The LN was in use and is migrated.
1065:                boolean lockDenied = false; // The LN lock was denied.
1066:                boolean completed = false; // This method completed.
1067:                boolean clearTarget = false; // Node was non-resident when called.
1068:
1069:                /*
1070:                 * If wasCleaned is false we don't count statistics unless we migrate
1071:                 * the LN.  This avoids double counting.
1072:                 */
1073:                BasicLocker locker = null;
1074:                LN ln = null;
1075:
1076:                try {
1077:                    if (lsn == DbLsn.NULL_LSN) {
1078:                        /* This node was never written, no need to migrate. */
1079:                        completed = true;
1080:                        return;
1081:                    }
1082:
1083:                    /*
1084:                     * Fetch the node, if necessary.  If it was not resident and it is
1085:                     * an evictable LN, we will clear it after we migrate it.
1086:                     */
1087:                    if (!bin.isEntryKnownDeleted(index)) {
1088:                        ln = (LN) bin.getTarget(index);
1089:                        if (ln == null) {
1090:                            /* If fetchTarget returns null, a deleted LN was cleaned.*/
1091:                            ln = (LN) bin.fetchTarget(index);
1092:                            clearTarget = !db.getId().equals(DbTree.ID_DB_ID);
1093:                        }
1094:                    }
1095:
1096:                    /* Don't migrate knownDeleted or deleted cleaned LNs.  */
1097:                    if (ln == null) {
1098:                        if (wasCleaned) {
1099:                            nLNsDead++;
1100:                        }
1101:                        obsolete = true;
1102:                        completed = true;
1103:                        return;
1104:                    }
1105:
1106:                    /*
1107:                     * Get a non-blocking read lock on the LN.  A pending node is
1108:                     * already locked, but that node ID may be different than the
1109:                     * current LN's node if a slot is reused.  We must lock the current
1110:                     * node to guard against aborts.
1111:                     */
1112:                    if (lockedPendingNodeId != ln.getNodeId()) {
1113:                        locker = new BasicLocker(env);
1114:                        LockResult lockRet = locker.nonBlockingLock(ln
1115:                                .getNodeId(), LockType.READ, db);
1116:                        if (lockRet.getLockGrant() == LockGrantType.DENIED) {
1117:
1118:                            /*
1119:                             * LN is currently locked by another Locker, so we can't
1120:                             * assume anything about the value of the LSN in the bin.
1121:                             */
1122:                            if (wasCleaned) {
1123:                                nLNsLocked++;
1124:                            }
1125:                            lockDenied = true;
1126:                            completed = true;
1127:                            return;
1128:                        }
1129:                    }
1130:
1131:                    /* Don't migrate deleted LNs.  */
1132:                    if (ln.isDeleted()) {
1133:                        bin.setKnownDeletedLeaveTarget(index);
1134:                        if (wasCleaned) {
1135:                            nLNsDead++;
1136:                        }
1137:                        obsolete = true;
1138:                        completed = true;
1139:                        return;
1140:                    }
1141:
1142:                    /*
1143:                     * Once we have a lock, check whether the current LSN needs to be
1144:                     * migrated.  There is no need to migrate it if the LSN no longer
1145:                     * qualifies for cleaning.  The LSN could have been changed by an
1146:                     * update or delete after we set the MIGRATE flag.
1147:                     *
1148:                     * Note that we do not perform this optimization if the MIGRATE
1149:                     * flag is not set, i.e, for clustering and proactive migration of
1150:                     * resident LNs.  For these cases, we checked the conditions for
1151:                     * migration immediately before calling this method.  Although the
1152:                     * condition could change after locking, the window is small and
1153:                     * a second check is not worthwhile.
1154:                     */
1155:                    if (bin.getMigrate(index)) {
1156:                        Long fileNum = new Long(DbLsn.getFileNumber(lsn));
1157:                        if (!fileSelector.isFileCleaningInProgress(fileNum)) {
1158:                            obsolete = true;
1159:                            completed = true;
1160:                            if (wasCleaned) {
1161:                                nLNsDead++;
1162:                            }
1163:                            return;
1164:                        }
1165:                    }
1166:
1167:                    /* Migrate the LN. */
1168:                    byte[] key = getLNMainKey(bin, index);
1169:                    long newLNLsn = ln.logUpdateMemUsage(db, key, lsn, locker,
1170:                            bin, backgroundIO);
1171:                    bin.updateEntry(index, newLNLsn);
1172:                    nLNsMigrated++;
1173:                    migrated = true;
1174:                    completed = true;
1175:                    return;
1176:                } finally {
1177:                    if (isPending) {
1178:                        if (completed && !lockDenied) {
1179:                            fileSelector.removePendingLN(lockedPendingNodeId);
1180:                        }
1181:                    } else {
1182:
1183:                        /*
1184:                         * If a to-be-migrated LN was not processed successfully, we
1185:                         * must guarantee that the file will not be deleted and that we
1186:                         * will retry the LN later.  The retry information must be
1187:                         * complete or we may delete a file later without processing
1188:                         * all of its LNs.
1189:                         */
1190:                        if (bin.getMigrate(index) && (!completed || lockDenied)) {
1191:
1192:                            byte[] key = getLNMainKey(bin, index);
1193:                            byte[] dupKey = getLNDupKey(bin, index, ln);
1194:                            fileSelector.addPendingLN(ln, db.getId(), key,
1195:                                    dupKey);
1196:
1197:                            /* Wake up the cleaner thread to process pending LNs. */
1198:                            if (!areThreadsRunning()) {
1199:                                env.getUtilizationTracker().activateCleaner();
1200:                            }
1201:
1202:                            /*
1203:                             * If we need to retry, don't clear the target since we
1204:                             * would only have to fetch it again soon.
1205:                             */
1206:                            clearTarget = false;
1207:                        }
1208:                    }
1209:
1210:                    /*
1211:                     * Always clear the migrate flag.  If the LN could not be locked
1212:                     * and the migrate flag was set, the LN will have been added to the
1213:                     * pending LN set above.
1214:                     */
1215:                    bin.setMigrate(index, false);
1216:
1217:                    /*
1218:                     * If the node was originally non-resident, clear it now so that we
1219:                     * don't create more work for the evictor and reduce the cache
1220:                     * memory available to the application.
1221:                     */
1222:                    if (clearTarget) {
1223:                        bin.updateEntry(index, null);
1224:                    }
1225:
1226:                    if (locker != null) {
1227:                        locker.operationEnd();
1228:                    }
1229:
1230:                    trace(detailedTraceLevel, cleanAction, ln, lsn, completed,
1231:                            obsolete, migrated);
1232:                }
1233:            }
1234:
1235:            /**
1236:             * Migrate the DupCountLN for the given DIN.  The DIN is latched on entry
1237:             * to this method and is left latched when it returns.
1238:             */
1239:            private void migrateDupCountLN(DatabaseImpl db, long lsn,
1240:                    DIN parentDIN, ChildReference dclRef, boolean wasCleaned,
1241:                    boolean isPending, long lockedPendingNodeId,
1242:                    String cleanAction) throws DatabaseException {
1243:
1244:                /* Status variables are used to generate debug tracing info. */
1245:                boolean obsolete = false; // The LN is no longer in use.
1246:                boolean migrated = false; // The LN was in use and is migrated.
1247:                boolean lockDenied = false; // The LN lock was denied.
1248:                boolean completed = false; // This method completed.
1249:                boolean clearTarget = false; // Node was non-resident when called.
1250:
1251:                /*
1252:                 * If wasCleaned is false we don't count statistics unless we migrate
1253:                 * the LN.  This avoids double counting.
1254:                 */
1255:                BasicLocker locker = null;
1256:                LN ln = null;
1257:
1258:                try {
1259:                    if (lsn == DbLsn.NULL_LSN) {
1260:                        /* This node was never written, no need to migrate. */
1261:                        completed = true;
1262:                        return;
1263:                    }
1264:
1265:                    /*
1266:                     * Fetch the node, if necessary.  If it was not resident and it is
1267:                     * an evictable LN, we will clear it after we migrate it.
1268:                     */
1269:                    ln = (LN) dclRef.getTarget();
1270:                    if (ln == null) {
1271:                        ln = (LN) dclRef.fetchTarget(db, parentDIN);
1272:                        assert ln != null;
1273:                        clearTarget = !db.getId().equals(DbTree.ID_DB_ID);
1274:                    }
1275:
1276:                    /*
1277:                     * Get a non-blocking read lock on the LN, if this is not an
1278:                     * already locked pending node.
1279:                     */
1280:                    if (lockedPendingNodeId != ln.getNodeId()) {
1281:                        locker = new BasicLocker(env);
1282:                        LockResult lockRet = locker.nonBlockingLock(ln
1283:                                .getNodeId(), LockType.READ, db);
1284:                        if (lockRet.getLockGrant() == LockGrantType.DENIED) {
1285:
1286:                            /*
1287:                             * LN is currently locked by another Locker, so we can't
1288:                             * assume anything about the value of the LSN in the bin.
1289:                             */
1290:                            if (wasCleaned) {
1291:                                nLNsLocked++;
1292:                            }
1293:                            lockDenied = true;
1294:                            completed = true;
1295:                            return;
1296:                        }
1297:                    }
1298:
1299:                    /*
1300:                     * Once we have a lock, check whether the current LSN needs to be
1301:                     * migrated.  There is no need to migrate it if the LSN no longer
1302:                     * qualifies for cleaning.
1303:                     */
1304:                    Long fileNum = new Long(DbLsn.getFileNumber(lsn));
1305:                    if (!fileSelector.isFileCleaningInProgress(fileNum)) {
1306:                        obsolete = true;
1307:                        completed = true;
1308:                        if (wasCleaned) {
1309:                            nLNsDead++;
1310:                        }
1311:                        return;
1312:                    }
1313:
1314:                    /* Migrate the LN. */
1315:                    byte[] key = parentDIN.getDupKey();
1316:                    long newLNLsn = ln.logUpdateMemUsage(db, key, lsn, locker,
1317:                            parentDIN);
1318:                    parentDIN.updateDupCountLNRef(newLNLsn);
1319:                    nLNsMigrated++;
1320:                    migrated = true;
1321:                    completed = true;
1322:                    return;
1323:                } finally {
1324:                    if (isPending) {
1325:                        if (completed && !lockDenied) {
1326:                            fileSelector.removePendingLN(lockedPendingNodeId);
1327:                        }
1328:                    } else {
1329:
1330:                        /*
1331:                         * If a to-be-migrated LN was not processed successfully, we
1332:                         * must guarantee that the file will not be deleted and that we
1333:                         * will retry the LN later.  The retry information must be
1334:                         * complete or we may delete a file later without processing
1335:                         * all of its LNs.
1336:                         */
1337:                        if (dclRef.getMigrate() && (!completed || lockDenied)) {
1338:
1339:                            byte[] key = parentDIN.getDupKey();
1340:                            byte[] dupKey = null;
1341:                            fileSelector.addPendingLN(ln, db.getId(), key,
1342:                                    dupKey);
1343:
1344:                            /* Wake up the cleaner thread to process pending LNs. */
1345:                            if (!areThreadsRunning()) {
1346:                                env.getUtilizationTracker().activateCleaner();
1347:                            }
1348:
1349:                            /*
1350:                             * If we need to retry, don't clear the target since we
1351:                             * would only have to fetch it again soon.
1352:                             */
1353:                            clearTarget = false;
1354:                        }
1355:                    }
1356:
1357:                    /*
1358:                     * Always clear the migrate flag.  If the LN could not be locked
1359:                     * and the migrate flag was set, the LN will have been added to the
1360:                     * pending LN set above.
1361:                     */
1362:                    dclRef.setMigrate(false);
1363:
1364:                    /*
1365:                     * If the node was originally non-resident, clear it now so that we
1366:                     * don't create more work for the evictor and reduce the cache
1367:                     * memory available to the application.
1368:                     */
1369:                    if (clearTarget) {
1370:                        parentDIN.updateDupCountLN(null);
1371:                    }
1372:
1373:                    if (locker != null) {
1374:                        locker.operationEnd();
1375:                    }
1376:
1377:                    trace(detailedTraceLevel, cleanAction, ln, lsn, completed,
1378:                            obsolete, migrated);
1379:                }
1380:            }
1381:
1382:            /**
1383:             * Returns the main key for a given BIN entry.
1384:             */
1385:            private byte[] getLNMainKey(BIN bin, int index)
1386:                    throws DatabaseException {
1387:
1388:                if (bin.containsDuplicates()) {
1389:                    return bin.getDupKey();
1390:                } else {
1391:                    return bin.getKey(index);
1392:                }
1393:            }
1394:
1395:            /**
1396:             * Returns the duplicate key for a given BIN entry.
1397:             */
1398:            private byte[] getLNDupKey(BIN bin, int index, LN ln)
1399:                    throws DatabaseException {
1400:
1401:                DatabaseImpl db = bin.getDatabase();
1402:
1403:                if (!db.getSortedDuplicates() || ln.containsDuplicates()) {
1404:
1405:                    /*
1406:                     * The dup key is not needed for a non-duplicate DB or for a
1407:                     * DupCountLN.
1408:                     */
1409:                    return null;
1410:
1411:                } else if (bin.containsDuplicates()) {
1412:
1413:                    /* The DBIN entry key is the dup key. */
1414:                    return bin.getKey(index);
1415:
1416:                } else {
1417:
1418:                    /*
1419:                     * The data is the dup key if the LN is not deleted.  If the LN is
1420:                     * deleted, this method will return null and we will do a node ID
1421:                     * search later when processing the pending LN.
1422:                     */
1423:                    return ln.getData();
1424:                }
1425:            }
1426:
1427:            /**
1428:             * Adds the DB ID to the pending DB set if it is being deleted but deletion
1429:             * is not yet complete.
1430:             */
1431:            void addPendingDB(DatabaseImpl db) {
1432:                if (db != null && db.isDeleted() && !db.isDeleteFinished()) {
1433:                    DatabaseId id = db.getId();
1434:                    if (fileSelector.addPendingDB(id)) {
1435:                        Tracer.trace(detailedTraceLevel, env,
1436:                                "CleanAddPendingDB " + id);
1437:                    }
1438:                }
1439:            }
1440:
1441:            /**
1442:             * Send trace messages to the java.util.logger. Don't rely on the logger
1443:             * alone to conditionalize whether we send this message, we don't even want
1444:             * to construct the message if the level is not enabled.
1445:             */
1446:            void trace(Level level, String action, Node node, long logLsn,
1447:                    boolean completed, boolean obsolete, boolean dirtiedMigrated) {
1448:
1449:                Logger logger = env.getLogger();
1450:                if (logger.isLoggable(level)) {
1451:                    StringBuffer sb = new StringBuffer();
1452:                    sb.append(action);
1453:                    if (node != null) {
1454:                        sb.append(" node=");
1455:                        sb.append(node.getNodeId());
1456:                    }
1457:                    sb.append(" logLsn=");
1458:                    sb.append(DbLsn.getNoFormatString(logLsn));
1459:                    sb.append(" complete=").append(completed);
1460:                    sb.append(" obsolete=").append(obsolete);
1461:                    sb.append(" dirtiedOrMigrated=").append(dirtiedMigrated);
1462:
1463:                    logger.log(level, sb.toString());
1464:                }
1465:            }
1466:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.