Source Code Cross Referenced for CreateIndexConstantAction.java in  » Database-DBMS » db-derby-10.2 » org » apache » derby » impl » sql » execute » Java Source Code / Java DocumentationJava Source Code and Java Documentation

Java Source Code / Java Documentation
1. 6.0 JDK Core
2. 6.0 JDK Modules
3. 6.0 JDK Modules com.sun
4. 6.0 JDK Modules com.sun.java
5. 6.0 JDK Modules sun
6. 6.0 JDK Platform
7. Ajax
8. Apache Harmony Java SE
9. Aspect oriented
10. Authentication Authorization
11. Blogger System
12. Build
13. Byte Code
14. Cache
15. Chart
16. Chat
17. Code Analyzer
18. Collaboration
19. Content Management System
20. Database Client
21. Database DBMS
22. Database JDBC Connection Pool
23. Database ORM
24. Development
25. EJB Server geronimo
26. EJB Server GlassFish
27. EJB Server JBoss 4.2.1
28. EJB Server resin 3.1.5
29. ERP CRM Financial
30. ESB
31. Forum
32. GIS
33. Graphic Library
34. Groupware
35. HTML Parser
36. IDE
37. IDE Eclipse
38. IDE Netbeans
39. Installer
40. Internationalization Localization
41. Inversion of Control
42. Issue Tracking
43. J2EE
44. JBoss
45. JMS
46. JMX
47. Library
48. Mail Clients
49. Net
50. Parser
51. PDF
52. Portal
53. Profiler
54. Project Management
55. Report
56. RSS RDF
57. Rule Engine
58. Science
59. Scripting
60. Search Engine
61. Security
62. Sevlet Container
63. Source Control
64. Swing Library
65. Template Engine
66. Test Coverage
67. Testing
68. UML
69. Web Crawler
70. Web Framework
71. Web Mail
72. Web Server
73. Web Services
74. Web Services apache cxf 2.0.1
75. Web Services AXIS2
76. Wiki Engine
77. Workflow Engines
78. XML
79. XML UI
Java
Java Tutorial
Java Open Source
Jar File Download
Java Articles
Java Products
Java by API
Photoshop Tutorials
Maya Tutorials
Flash Tutorials
3ds-Max Tutorials
Illustrator Tutorials
GIMP Tutorials
C# / C Sharp
C# / CSharp Tutorial
C# / CSharp Open Source
ASP.Net
ASP.NET Tutorial
JavaScript DHTML
JavaScript Tutorial
JavaScript Reference
HTML / CSS
HTML CSS Reference
C / ANSI-C
C Tutorial
C++
C++ Tutorial
Ruby
PHP
Python
Python Tutorial
Python Open Source
SQL Server / T-SQL
SQL Server / T-SQL Tutorial
Oracle PL / SQL
Oracle PL/SQL Tutorial
PostgreSQL
SQL / MySQL
MySQL Tutorial
VB.Net
VB.Net Tutorial
Flash / Flex / ActionScript
VBA / Excel / Access / Word
XML
XML Tutorial
Microsoft Office PowerPoint 2007 Tutorial
Microsoft Office Excel 2007 Tutorial
Microsoft Office Word 2007 Tutorial
Java Source Code / Java Documentation » Database DBMS » db derby 10.2 » org.apache.derby.impl.sql.execute 
Source Cross Referenced  Class Diagram Java Document (Java Doc) 


001:        /*
002:
003:           Derby - Class org.apache.derby.impl.sql.execute.CreateIndexConstantAction
004:
005:           Licensed to the Apache Software Foundation (ASF) under one or more
006:           contributor license agreements.  See the NOTICE file distributed with
007:           this work for additional information regarding copyright ownership.
008:           The ASF licenses this file to you under the Apache License, Version 2.0
009:           (the "License"); you may not use this file except in compliance with
010:           the License.  You may obtain a copy of the License at
011:
012:              http://www.apache.org/licenses/LICENSE-2.0
013:
014:           Unless required by applicable law or agreed to in writing, software
015:           distributed under the License is distributed on an "AS IS" BASIS,
016:           WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
017:           See the License for the specific language governing permissions and
018:           limitations under the License.
019:
020:         */
021:
022:        package org.apache.derby.impl.sql.execute;
023:
024:        import org.apache.derby.iapi.services.sanity.SanityManager;
025:
026:        import org.apache.derby.iapi.services.loader.ClassFactory;
027:        import org.apache.derby.iapi.services.loader.ClassInspector;
028:
029:        import org.apache.derby.iapi.services.stream.HeaderPrintWriter;
030:
031:        import org.apache.derby.iapi.sql.execute.ConstantAction;
032:        import org.apache.derby.iapi.sql.execute.ExecutionContext;
033:        import org.apache.derby.iapi.sql.execute.ExecRow;
034:        import org.apache.derby.iapi.sql.execute.ExecIndexRow;
035:
036:        import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
037:        import org.apache.derby.iapi.sql.dictionary.ColumnDescriptorList;
038:        import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptorList;
039:        import org.apache.derby.iapi.sql.dictionary.DataDescriptorGenerator;
040:        import org.apache.derby.iapi.sql.dictionary.DataDictionaryContext;
041:        import org.apache.derby.iapi.sql.dictionary.DataDictionary;
042:        import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;
043:        import org.apache.derby.iapi.sql.dictionary.ColumnDescriptor;
044:        import org.apache.derby.iapi.sql.dictionary.SchemaDescriptor;
045:        import org.apache.derby.iapi.sql.dictionary.IndexRowGenerator;
046:        import org.apache.derby.iapi.sql.dictionary.TableDescriptor;
047:        import org.apache.derby.iapi.sql.dictionary.ConstraintDescriptor;
048:        import org.apache.derby.iapi.sql.dictionary.StatisticsDescriptor;
049:        import org.apache.derby.iapi.sql.depend.DependencyManager;
050:        import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;
051:        import org.apache.derby.iapi.sql.Activation;
052:
053:        import org.apache.derby.iapi.types.DataValueFactory;
054:        import org.apache.derby.iapi.types.DataTypeDescriptor;
055:        import org.apache.derby.iapi.types.TypeId;
056:        import org.apache.derby.iapi.types.RowLocation;
057:
058:        import org.apache.derby.iapi.reference.SQLState;
059:
060:        import org.apache.derby.iapi.error.StandardException;
061:
062:        import org.apache.derby.iapi.store.access.ColumnOrdering;
063:        import org.apache.derby.iapi.store.access.ConglomerateController;
064:        import org.apache.derby.iapi.store.access.GroupFetchScanController;
065:        import org.apache.derby.iapi.store.access.RowLocationRetRowSource;
066:        import org.apache.derby.iapi.store.access.ScanController;
067:        import org.apache.derby.iapi.store.access.SortObserver;
068:        import org.apache.derby.iapi.store.access.SortController;
069:        import org.apache.derby.iapi.store.access.TransactionController;
070:        import org.apache.derby.iapi.types.DataValueDescriptor;
071:
072:        import org.apache.derby.catalog.UUID;
073:        import org.apache.derby.catalog.types.StatisticsImpl;
074:
075:        import java.util.Properties;
076:        import org.apache.derby.iapi.services.io.FormatableBitSet;
077:
078:        /**
079:         *	This class  describes actions that are ALWAYS performed for a
080:         *	CREATE TABLE Statement at Execution time.
081:         *
082:         *	@author Jeff Lichtman	Cribbed from from CreateTableConstantAction
083:         */
084:
085:        class CreateIndexConstantAction extends IndexConstantAction {
086:
087:            private boolean unique;
088:            private String indexType;
089:            private long conglomId;
090:            private String[] columnNames;
091:            private boolean[] isAscending;
092:            private boolean isConstraint;
093:            private UUID conglomerateUUID;
094:            private Properties properties;
095:
096:            private ExecRow indexTemplateRow;
097:
098:            // CONSTRUCTORS
099:            /**
100:             *	Make the ConstantAction for a CREATE INDEX statement.
101:             *
102:             *  @param unique		True means it will be a unique index
103:             *  @param indexType	The type of index (BTREE, for example)
104:             *  @param schemaName	the schema that table (and index) lives in.
105:             *  @param indexName	Name of the index
106:             *  @param tableName	Name of table the index will be on
107:             *  @param tableId		UUID of table
108:             *  @param conglomId	Conglomerate ID of the index, if known in advance
109:             *  @param columnNames	Names of the columns in the index, in order
110:             *	@param isAscending	Array of booleans telling asc/desc on each column
111:             *  @param isConstraint	TRUE if index is backing up a constraint, else FALSE
112:             *  @param conglomerateUUID	ID of conglomerate
113:             *  @param properties	The optional properties list associated with the index.
114:             */
115:            CreateIndexConstantAction(boolean unique, String indexType,
116:                    String schemaName, String indexName, String tableName,
117:                    UUID tableId, long conglomId, String[] columnNames,
118:                    boolean[] isAscending, boolean isConstraint,
119:                    UUID conglomerateUUID, Properties properties) {
120:                super (tableId, indexName, tableName, schemaName);
121:                this .unique = unique;
122:                this .indexType = indexType;
123:                this .conglomId = conglomId;
124:                this .columnNames = columnNames;
125:                this .isAscending = isAscending;
126:                this .isConstraint = isConstraint;
127:                this .conglomerateUUID = conglomerateUUID;
128:                this .properties = properties;
129:            }
130:
131:            ///////////////////////////////////////////////
132:            //
133:            // OBJECT SHADOWS
134:            //
135:            ///////////////////////////////////////////////
136:
137:            public String toString() {
138:                // Do not put this under SanityManager.DEBUG - it is needed for
139:                // error reporting.
140:                return "CREATE INDEX " + indexName;
141:            }
142:
143:            // INTERFACE METHODS
144:
145:            /**
146:             *	This is the guts of the Execution-time logic for CREATE INDEX.
147:             *
148:             *	@see ConstantAction#executeConstantAction
149:             *
150:             * @exception StandardException		Thrown on failure
151:             */
152:            public void executeConstantAction(Activation activation)
153:                    throws StandardException {
154:                boolean forCreateTable;
155:                TableDescriptor td;
156:                UUID toid;
157:                ColumnDescriptor columnDescriptor;
158:                int[] baseColumnPositions;
159:                IndexRowGenerator indexRowGenerator = null;
160:                ExecRow[] baseRows;
161:                ExecIndexRow[] indexRows;
162:                ExecRow[] compactBaseRows;
163:                GroupFetchScanController scan;
164:                RowLocationRetRowSource rowSource;
165:                long sortId;
166:                int maxBaseColumnPosition = -1;
167:
168:                LanguageConnectionContext lcc = activation
169:                        .getLanguageConnectionContext();
170:                DataDictionary dd = lcc.getDataDictionary();
171:                DependencyManager dm = dd.getDependencyManager();
172:                TransactionController tc = lcc.getTransactionExecute();
173:
174:                /* Remember whether or not we are doing a create table */
175:                forCreateTable = activation.getForCreateTable();
176:
177:                /*
178:                 ** Inform the data dictionary that we are about to write to it.
179:                 ** There are several calls to data dictionary "get" methods here
180:                 ** that might be done in "read" mode in the data dictionary, but
181:                 ** it seemed safer to do this whole operation in "write" mode.
182:                 **
183:                 ** We tell the data dictionary we're done writing at the end of
184:                 ** the transaction.
185:                 */
186:                dd.startWriting(lcc);
187:
188:                /*
189:                 ** If the schema descriptor is null, then
190:                 ** we must have just read ourselves in.  
191:                 ** So we will get the corresponding schema
192:                 ** descriptor from the data dictionary.
193:                 */
194:                SchemaDescriptor sd = dd.getSchemaDescriptor(schemaName, tc,
195:                        true);
196:
197:                /* Get the table descriptor. */
198:                /* See if we can get the TableDescriptor 
199:                 * from the Activation.  (Will be there
200:                 * for backing indexes.)
201:                 */
202:                td = activation.getDDLTableDescriptor();
203:
204:                if (td == null) {
205:                    /* tableId will be non-null if adding an index to
206:                     * an existing table (as opposed to creating a
207:                     * table with a constraint with a backing index).
208:                     */
209:                    if (tableId != null) {
210:                        td = dd.getTableDescriptor(tableId);
211:                    } else {
212:                        td = dd.getTableDescriptor(tableName, sd);
213:                    }
214:                }
215:
216:                if (td == null) {
217:                    throw StandardException.newException(
218:                            SQLState.LANG_CREATE_INDEX_NO_TABLE, indexName,
219:                            tableName);
220:                }
221:
222:                if (td.getTableType() == TableDescriptor.SYSTEM_TABLE_TYPE) {
223:                    throw StandardException.newException(
224:                            SQLState.LANG_CREATE_SYSTEM_INDEX_ATTEMPTED,
225:                            indexName, tableName);
226:                }
227:
228:                /* Get a shared table lock on the table. We need to lock table before
229:                 * invalidate dependents, otherwise, we may interfere with the
230:                 * compilation/re-compilation of DML/DDL.  See beetle 4325 and $WS/
231:                 * docs/language/SolutionsToConcurrencyIssues.txt (point f).
232:                 */
233:                lockTableForDDL(tc, td.getHeapConglomerateId(), false);
234:
235:                // invalidate any prepared statements that
236:                // depended on this table (including this one)
237:                if (!forCreateTable) {
238:                    dm.invalidateFor(td, DependencyManager.CREATE_INDEX, lcc);
239:                }
240:
241:                // Translate the base column names to column positions
242:                baseColumnPositions = new int[columnNames.length];
243:                for (int i = 0; i < columnNames.length; i++) {
244:                    // Look up the column in the data dictionary
245:                    columnDescriptor = td.getColumnDescriptor(columnNames[i]);
246:                    if (columnDescriptor == null) {
247:                        throw StandardException.newException(
248:                                SQLState.LANG_COLUMN_NOT_FOUND_IN_TABLE,
249:                                columnNames[i], tableName);
250:                    }
251:
252:                    TypeId typeId = columnDescriptor.getType().getTypeId();
253:
254:                    // Don't allow a column to be created on a non-orderable type
255:                    ClassFactory cf = lcc.getLanguageConnectionFactory()
256:                            .getClassFactory();
257:                    boolean isIndexable = typeId.orderable(cf);
258:
259:                    if (isIndexable && typeId.userType()) {
260:                        String userClass = typeId
261:                                .getCorrespondingJavaTypeName();
262:
263:                        // Don't allow indexes to be created on classes that
264:                        // are loaded from the database. This is because recovery
265:                        // won't be able to see the class and it will need it to
266:                        // run the compare method.
267:                        try {
268:                            if (cf.isApplicationClass(cf
269:                                    .loadApplicationClass(userClass)))
270:                                isIndexable = false;
271:                        } catch (ClassNotFoundException cnfe) {
272:                            // shouldn't happen as we just check the class is orderable
273:                            isIndexable = false;
274:                        }
275:                    }
276:
277:                    if (!isIndexable) {
278:                        throw StandardException
279:                                .newException(
280:                                        SQLState.LANG_COLUMN_NOT_ORDERABLE_DURING_EXECUTION,
281:                                        typeId.getSQLTypeName());
282:                    }
283:
284:                    // Remember the position in the base table of each column
285:                    baseColumnPositions[i] = columnDescriptor.getPosition();
286:
287:                    if (maxBaseColumnPosition < baseColumnPositions[i])
288:                        maxBaseColumnPosition = baseColumnPositions[i];
289:                }
290:
291:                // check if we have similar indices already for this table
292:                ConglomerateDescriptor[] congDescs = td
293:                        .getConglomerateDescriptors();
294:                boolean duplicate = false;
295:
296:                for (int i = 0; i < congDescs.length; i++) {
297:                    ConglomerateDescriptor cd = congDescs[i];
298:                    if (!cd.isIndex())
299:                        continue;
300:                    IndexRowGenerator irg = cd.getIndexDescriptor();
301:                    int[] bcps = irg.baseColumnPositions();
302:                    boolean[] ia = irg.isAscending();
303:                    int j = 0;
304:
305:                    /* For an index to be considered a duplicate of already existing index, the
306:                     * following conditions have to be satisfied:
307:                     * 1. the set of columns (both key and include columns) and their 
308:                     *  order in the index is the same as that of an existing index AND 
309:                     * 2. the ordering attributes are the same AND 
310:                     * 3. both the previously existing index and the one being created 
311:                     *  are non-unique OR the previously existing index is unique
312:                     */
313:
314:                    if ((bcps.length == baseColumnPositions.length)
315:                            && (irg.isUnique() || !unique)
316:                            && indexType.equals(irg.indexType())) {
317:                        for (; j < bcps.length; j++) {
318:                            if ((bcps[j] != baseColumnPositions[j])
319:                                    || (ia[j] != isAscending[j]))
320:                                break;
321:                        }
322:                    }
323:
324:                    if (j == baseColumnPositions.length) // duplicate
325:                    {
326:                        /*
327:                         * Don't allow users to create a duplicate index. Allow if being done internally
328:                         * for a constraint
329:                         */
330:                        if (!isConstraint) {
331:                            activation.addWarning(StandardException.newWarning(
332:                                    SQLState.LANG_INDEX_DUPLICATE, cd
333:                                            .getConglomerateName()));
334:
335:                            return;
336:                        }
337:
338:                        //Duplicate indexes share the physical conglomerate underneath
339:                        conglomId = cd.getConglomerateNumber();
340:                        indexRowGenerator = cd.getIndexDescriptor();
341:                        //DERBY-655 and DERBY-1343  
342:                        //Duplicate indexes will have unqiue logical conglomerate UUIDs.  
343:                        conglomerateUUID = dd.getUUIDFactory().createUUID();
344:                        duplicate = true;
345:                        break;
346:                    }
347:                }
348:
349:                /* If this index already has an essentially same one, we share the
350:                 * conglomerate with the old one, and just simply add a descriptor
351:                 * entry into SYSCONGLOMERATES.
352:                 */
353:                DataDescriptorGenerator ddg = dd.getDataDescriptorGenerator();
354:                if (duplicate) {
355:                    ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(
356:                            conglomId, indexName, true, indexRowGenerator,
357:                            isConstraint, conglomerateUUID, td.getUUID(), sd
358:                                    .getUUID());
359:                    dd.addDescriptor(cgd, sd,
360:                            DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false,
361:                            tc);
362:                    // add newly added conglomerate to the list of conglomerate 
363:                    // descriptors in the td.
364:                    ConglomerateDescriptorList cdl = td
365:                            .getConglomerateDescriptorList();
366:                    cdl.add(cgd);
367:
368:                    // can't just return yet, need to get member "indexTemplateRow"
369:                    // because create constraint may use it
370:                }
371:
372:                // Describe the properties of the index to the store using Properties
373:                // RESOLVE: The following properties assume a BTREE index.
374:                Properties indexProperties;
375:
376:                if (properties != null) {
377:                    indexProperties = properties;
378:                } else {
379:                    indexProperties = new Properties();
380:                }
381:
382:                // Tell it the conglomerate id of the base table
383:                indexProperties.put("baseConglomerateId", Long.toString(td
384:                        .getHeapConglomerateId()));
385:
386:                // All indexes are unique because they contain the RowLocation.
387:                // The number of uniqueness columns must include the RowLocation
388:                // if the user did not specify a unique index.
389:                indexProperties.put("nUniqueColumns", Integer
390:                        .toString(unique ? baseColumnPositions.length
391:                                : baseColumnPositions.length + 1));
392:
393:                // By convention, the row location column is the last column
394:                indexProperties.put("rowLocationColumn", Integer
395:                        .toString(baseColumnPositions.length));
396:
397:                // For now, all columns are key fields, including the RowLocation
398:                indexProperties.put("nKeyFields", Integer
399:                        .toString(baseColumnPositions.length + 1));
400:
401:                // For now, assume that all index columns are ordered columns
402:                if (!duplicate) {
403:                    indexRowGenerator = new IndexRowGenerator(indexType,
404:                            unique, baseColumnPositions, isAscending,
405:                            baseColumnPositions.length);
406:                }
407:
408:                /* Now add the rows from the base table to the conglomerate.
409:                 * We do this by scanning the base table and inserting the
410:                 * rows into a sorter before inserting from the sorter
411:                 * into the index.  This gives us better performance
412:                 * and a more compact index.
413:                 */
414:
415:                rowSource = null;
416:                sortId = 0;
417:                boolean needToDropSort = false; // set to true once the sorter is created
418:
419:                /* bulkFetchSIze will be 16 (for now) unless
420:                 * we are creating the table in which case it
421:                 * will be 1.  Too hard to remove scan when
422:                 * creating index on new table, so minimize
423:                 * work where we can.
424:                 */
425:                int bulkFetchSize = (forCreateTable) ? 1 : 16;
426:                int numColumns = td.getNumberOfColumns();
427:                int approximateRowSize = 0;
428:
429:                // Create the FormatableBitSet for mapping the partial to full base row
430:                FormatableBitSet bitSet = new FormatableBitSet(numColumns + 1);
431:                for (int index = 0; index < baseColumnPositions.length; index++) {
432:                    bitSet.set(baseColumnPositions[index]);
433:                }
434:                FormatableBitSet zeroBasedBitSet = RowUtil.shift(bitSet, 1);
435:
436:                // Start by opening a full scan on the base table.
437:                scan = tc.openGroupFetchScan(
438:                        td.getHeapConglomerateId(),
439:                        false, // hold
440:                        0, // open base table read only
441:                        TransactionController.MODE_TABLE,
442:                        TransactionController.ISOLATION_SERIALIZABLE,
443:                        zeroBasedBitSet, // all fields as objects
444:                        (DataValueDescriptor[]) null, // startKeyValue
445:                        0, // not used when giving null start posn.
446:                        null, // qualifier
447:                        (DataValueDescriptor[]) null, // stopKeyValue
448:                        0); // not used when giving null stop posn.
449:
450:                // Create an array to put base row template
451:                baseRows = new ExecRow[bulkFetchSize];
452:                indexRows = new ExecIndexRow[bulkFetchSize];
453:                compactBaseRows = new ExecRow[bulkFetchSize];
454:
455:                try {
456:                    // Create the array of base row template
457:                    for (int i = 0; i < bulkFetchSize; i++) {
458:                        // create a base row template
459:                        baseRows[i] = activation.getExecutionFactory()
460:                                .getValueRow(maxBaseColumnPosition);
461:
462:                        // create an index row template
463:                        indexRows[i] = indexRowGenerator.getIndexRowTemplate();
464:
465:                        // create a compact base row template
466:                        compactBaseRows[i] = activation.getExecutionFactory()
467:                                .getValueRow(baseColumnPositions.length);
468:                    }
469:
470:                    indexTemplateRow = indexRows[0];
471:
472:                    // Fill the partial row with nulls of the correct type
473:                    ColumnDescriptorList cdl = td.getColumnDescriptorList();
474:                    int cdlSize = cdl.size();
475:                    for (int index = 0, numSet = 0; index < cdlSize; index++) {
476:                        if (!zeroBasedBitSet.get(index)) {
477:                            continue;
478:                        }
479:                        numSet++;
480:                        ColumnDescriptor cd = (ColumnDescriptor) cdl
481:                                .elementAt(index);
482:                        DataTypeDescriptor dts = cd.getType();
483:
484:                        for (int i = 0; i < bulkFetchSize; i++) {
485:                            // Put the column in both the compact and sparse base rows
486:                            baseRows[i].setColumn(index + 1, dts.getNull());
487:                            compactBaseRows[i].setColumn(numSet, baseRows[i]
488:                                    .getColumn(index + 1));
489:                        }
490:
491:                        // Calculate the approximate row size for the index row
492:                        approximateRowSize += dts.getTypeId()
493:                                .getApproximateLengthInBytes(dts);
494:                    }
495:
496:                    // Get an array of RowLocation template
497:                    RowLocation rl[] = new RowLocation[bulkFetchSize];
498:                    for (int i = 0; i < bulkFetchSize; i++) {
499:                        rl[i] = scan.newRowLocationTemplate();
500:
501:                        // Get an index row based on the base row
502:                        indexRowGenerator.getIndexRow(compactBaseRows[i],
503:                                rl[i], indexRows[i], bitSet);
504:                    }
505:
506:                    /* now that we got indexTemplateRow, done for duplicate index
507:                     */
508:                    if (duplicate)
509:                        return;
510:
511:                    /* For non-unique indexes, we order by all columns + the RID.
512:                     * For unique indexes, we just order by the columns.
513:                     * We create a unique index observer for unique indexes
514:                     * so that we can catch duplicate key.
515:                     * We create a basic sort observer for non-unique indexes
516:                     * so that we can reuse the wrappers during an external
517:                     * sort.
518:                     */
519:                    int numColumnOrderings;
520:                    SortObserver sortObserver = null;
521:                    if (unique) {
522:                        numColumnOrderings = baseColumnPositions.length;
523:                        // if the index is a constraint, use constraintname in possible error messagge
524:                        String indexOrConstraintName = indexName;
525:                        if (conglomerateUUID != null) {
526:                            ConglomerateDescriptor cd = dd
527:                                    .getConglomerateDescriptor(conglomerateUUID);
528:                            if ((isConstraint)
529:                                    && (cd != null && cd.getUUID() != null && td != null)) {
530:                                ConstraintDescriptor conDesc = dd
531:                                        .getConstraintDescriptor(td, cd
532:                                                .getUUID());
533:                                indexOrConstraintName = conDesc
534:                                        .getConstraintName();
535:                            }
536:                        }
537:                        sortObserver = new UniqueIndexSortObserver(true,
538:                                isConstraint, indexOrConstraintName,
539:                                indexTemplateRow, true, td.getName());
540:                    } else {
541:                        numColumnOrderings = baseColumnPositions.length + 1;
542:                        sortObserver = new BasicSortObserver(true, false,
543:                                indexTemplateRow, true);
544:                    }
545:                    ColumnOrdering[] order = new ColumnOrdering[numColumnOrderings];
546:                    for (int i = 0; i < numColumnOrderings; i++) {
547:                        order[i] = new IndexColumnOrder(i, unique
548:                                || i < numColumnOrderings - 1 ? isAscending[i]
549:                                : true);
550:                    }
551:
552:                    // create the sorter
553:                    sortId = tc.createSort((Properties) null, indexTemplateRow
554:                            .getRowArrayClone(), order, sortObserver, false, // not in order
555:                            scan.getEstimatedRowCount(), approximateRowSize // est row size, -1 means no idea	
556:                            );
557:
558:                    needToDropSort = true;
559:
560:                    // Populate sorter and get the output of the sorter into a row
561:                    // source.  The sorter has the indexed columns only and the columns
562:                    // are in the correct order. 
563:                    rowSource = loadSorter(baseRows, indexRows, tc, scan,
564:                            sortId, rl);
565:
566:                    conglomId = tc.createAndLoadConglomerate(indexType,
567:                            indexTemplateRow.getRowArray(), // index row template
568:                            order, //colums sort order
569:                            indexProperties, TransactionController.IS_DEFAULT, // not temporary
570:                            rowSource, (long[]) null);
571:
572:                } finally {
573:
574:                    /* close the table scan */
575:                    if (scan != null)
576:                        scan.close();
577:
578:                    /* close the sorter row source before throwing exception */
579:                    if (rowSource != null)
580:                        rowSource.closeRowSource();
581:
582:                    /*
583:                     ** drop the sort so that intermediate external sort run can be
584:                     ** removed from disk
585:                     */
586:                    if (needToDropSort)
587:                        tc.dropSort(sortId);
588:                }
589:
590:                ConglomerateController indexController = tc.openConglomerate(
591:                        conglomId, false, 0, TransactionController.MODE_TABLE,
592:                        TransactionController.ISOLATION_SERIALIZABLE);
593:
594:                // Check to make sure that the conglomerate can be used as an index
595:                if (!indexController.isKeyed()) {
596:                    indexController.close();
597:                    throw StandardException
598:                            .newException(SQLState.LANG_NON_KEYED_INDEX,
599:                                    indexName, indexType);
600:                }
601:                indexController.close();
602:
603:                //
604:                // Create a conglomerate descriptor with the conglomId filled in and
605:                // add it.
606:                //
607:
608:                ConglomerateDescriptor cgd = ddg.newConglomerateDescriptor(
609:                        conglomId, indexName, true, indexRowGenerator,
610:                        isConstraint, conglomerateUUID, td.getUUID(), sd
611:                                .getUUID());
612:
613:                dd.addDescriptor(cgd, sd,
614:                        DataDictionary.SYSCONGLOMERATES_CATALOG_NUM, false, tc);
615:
616:                // add newly added conglomerate to the list of conglomerate descriptors
617:                // in the td.
618:                ConglomerateDescriptorList cdl = td
619:                        .getConglomerateDescriptorList();
620:                cdl.add(cgd);
621:
622:                CardinalityCounter cCount = (CardinalityCounter) rowSource;
623:                long numRows;
624:                if ((numRows = cCount.getRowCount()) > 0) {
625:                    long[] c = cCount.getCardinality();
626:                    for (int i = 0; i < c.length; i++) {
627:                        StatisticsDescriptor statDesc = new StatisticsDescriptor(
628:                                dd, dd.getUUIDFactory().createUUID(), cgd
629:                                        .getUUID(), td.getUUID(), "I",
630:                                new StatisticsImpl(numRows, c[i]), i + 1);
631:                        dd.addDescriptor(statDesc, null,
632:                                DataDictionary.SYSSTATISTICS_CATALOG_NUM, true,
633:                                tc);
634:                    }
635:                }
636:            }
637:
638:            // CLASS METHODS
639:
640:            ///////////////////////////////////////////////////////////////////////
641:            //
642:            //	GETTERs called by CreateConstraint
643:            //
644:            ///////////////////////////////////////////////////////////////////////
645:            ExecRow getIndexTemplateRow() {
646:                return indexTemplateRow;
647:            }
648:
649:            /**
650:             * Do necessary clean up (close down controllers, etc.) before throwing
651:             * a statement exception.
652:             *
653:             * @param scan				ScanController for the heap
654:             * @param indexController	ConglomerateController for the index
655:             */
656:            private void statementExceptionCleanup(ScanController scan,
657:                    ConglomerateController indexController)
658:                    throws StandardException {
659:                if (indexController != null) {
660:                    indexController.close();
661:                }
662:                if (scan != null) {
663:                    scan.close();
664:                }
665:            }
666:
667:            /**
668:             * Scan the base conglomerate and insert the keys into a sorter,
669:             * returning a rowSource on the sorter. 
670:             *
671:             * @return RowSource on the sorted index keys.
672:             *
673:             * @exception StandardException					thrown on error
674:             */
675:            private RowLocationRetRowSource loadSorter(ExecRow[] baseRows,
676:                    ExecIndexRow[] indexRows, TransactionController tc,
677:                    GroupFetchScanController scan, long sortId,
678:                    RowLocation rl[]) throws StandardException {
679:                SortController sorter;
680:                long rowCount = 0;
681:
682:                sorter = tc.openSort(sortId);
683:
684:                try {
685:                    // Step through all the rows in the base table
686:                    // prepare an array or rows for bulk fetch
687:                    int bulkFetchSize = baseRows.length;
688:
689:                    if (SanityManager.DEBUG) {
690:                        SanityManager
691:                                .ASSERT(bulkFetchSize == indexRows.length,
692:                                        "number of base rows and index rows does not match");
693:                        SanityManager
694:                                .ASSERT(bulkFetchSize == rl.length,
695:                                        "number of base rows and row locations does not match");
696:                    }
697:
698:                    DataValueDescriptor[][] baseRowArray = new DataValueDescriptor[bulkFetchSize][];
699:
700:                    for (int i = 0; i < bulkFetchSize; i++)
701:                        baseRowArray[i] = baseRows[i].getRowArray();
702:
703:                    // rl[i] and baseRowArray[i] and indexRows[i] are all tied up
704:                    // beneath the surface.  Fetching the base row and row location
705:                    // from the table scan will automagically set up the indexRow
706:                    // fetchNextGroup will return how many rows are actually fetched.
707:                    int bulkFetched = 0;
708:
709:                    while ((bulkFetched = scan.fetchNextGroup(baseRowArray, rl)) > 0) {
710:                        for (int i = 0; i < bulkFetched; i++) {
711:                            sorter.insert(indexRows[i].getRowArray());
712:                            rowCount++;
713:                        }
714:                    }
715:
716:                    /*
717:                     ** We've just done a full scan on the heap, so set the number
718:                     ** of rows so the optimizer will have an accurate count.
719:                     */
720:                    scan.setEstimatedRowCount(rowCount);
721:                } finally {
722:                    sorter.close();
723:                }
724:
725:                return new CardinalityCounter(tc.openSortRowSource(sortId));
726:            }
727:        }
www.java2java.com | Contact Us
Copyright 2009 - 12 Demo Source and Support. All rights reserved.
All other trademarks are property of their respective owners.